Gilbert Brault преди 5 години
родител
ревизия
9b4eab36ff
променени са 100 файла, в които са добавени 22419 реда и са изтрити 1 реда
  1. 1 0
      .gitignore
  2. 1 1
      .vscode/settings.json
  3. 1 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/INSTALLER
  4. 28 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/LICENSE.rst
  5. 106 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/METADATA
  6. 61 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/RECORD
  7. 6 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/WHEEL
  8. 3 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/entry_points.txt
  9. 1 0
      Lib/site-packages/Jinja2-2.11.2.dist-info/top_level.txt
  10. 1 0
      Lib/site-packages/Markdown-3.2.2.dist-info/INSTALLER
  11. 29 0
      Lib/site-packages/Markdown-3.2.2.dist-info/LICENSE.md
  12. 57 0
      Lib/site-packages/Markdown-3.2.2.dist-info/METADATA
  13. 74 0
      Lib/site-packages/Markdown-3.2.2.dist-info/RECORD
  14. 5 0
      Lib/site-packages/Markdown-3.2.2.dist-info/WHEEL
  15. 23 0
      Lib/site-packages/Markdown-3.2.2.dist-info/entry_points.txt
  16. 1 0
      Lib/site-packages/Markdown-3.2.2.dist-info/top_level.txt
  17. 1 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER
  18. 28 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst
  19. 105 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/METADATA
  20. 15 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/RECORD
  21. 5 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL
  22. 1 0
      Lib/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt
  23. 1 0
      Lib/site-packages/PyYAML-5.3.1.dist-info/INSTALLER
  24. 20 0
      Lib/site-packages/PyYAML-5.3.1.dist-info/LICENSE
  25. 40 0
      Lib/site-packages/PyYAML-5.3.1.dist-info/METADATA
  26. 40 0
      Lib/site-packages/PyYAML-5.3.1.dist-info/RECORD
  27. 5 0
      Lib/site-packages/PyYAML-5.3.1.dist-info/WHEEL
  28. 231 0
      Lib/site-packages/Pygments-2.6.1.dist-info/AUTHORS
  29. 1 0
      Lib/site-packages/Pygments-2.6.1.dist-info/INSTALLER
  30. 25 0
      Lib/site-packages/Pygments-2.6.1.dist-info/LICENSE
  31. 48 0
      Lib/site-packages/Pygments-2.6.1.dist-info/METADATA
  32. 463 0
      Lib/site-packages/Pygments-2.6.1.dist-info/RECORD
  33. 5 0
      Lib/site-packages/Pygments-2.6.1.dist-info/WHEEL
  34. 3 0
      Lib/site-packages/Pygments-2.6.1.dist-info/entry_points.txt
  35. 1 0
      Lib/site-packages/Pygments-2.6.1.dist-info/top_level.txt
  36. BIN
      Lib/site-packages/_yaml.cp38-win_amd64.pyd
  37. 1 0
      Lib/site-packages/click-7.1.2.dist-info/INSTALLER
  38. 28 0
      Lib/site-packages/click-7.1.2.dist-info/LICENSE.rst
  39. 102 0
      Lib/site-packages/click-7.1.2.dist-info/METADATA
  40. 40 0
      Lib/site-packages/click-7.1.2.dist-info/RECORD
  41. 6 0
      Lib/site-packages/click-7.1.2.dist-info/WHEEL
  42. 1 0
      Lib/site-packages/click-7.1.2.dist-info/top_level.txt
  43. 79 0
      Lib/site-packages/click/__init__.py
  44. 375 0
      Lib/site-packages/click/_bashcomplete.py
  45. 786 0
      Lib/site-packages/click/_compat.py
  46. 657 0
      Lib/site-packages/click/_termui_impl.py
  47. 37 0
      Lib/site-packages/click/_textwrap.py
  48. 131 0
      Lib/site-packages/click/_unicodefun.py
  49. 370 0
      Lib/site-packages/click/_winconsole.py
  50. 2030 0
      Lib/site-packages/click/core.py
  51. 333 0
      Lib/site-packages/click/decorators.py
  52. 253 0
      Lib/site-packages/click/exceptions.py
  53. 283 0
      Lib/site-packages/click/formatting.py
  54. 47 0
      Lib/site-packages/click/globals.py
  55. 428 0
      Lib/site-packages/click/parser.py
  56. 681 0
      Lib/site-packages/click/termui.py
  57. 382 0
      Lib/site-packages/click/testing.py
  58. 762 0
      Lib/site-packages/click/types.py
  59. 455 0
      Lib/site-packages/click/utils.py
  60. 5 0
      Lib/site-packages/easy_install.py
  61. 1 0
      Lib/site-packages/future-0.18.2.dist-info/INSTALLER
  62. 19 0
      Lib/site-packages/future-0.18.2.dist-info/LICENSE.txt
  63. 109 0
      Lib/site-packages/future-0.18.2.dist-info/METADATA
  64. 415 0
      Lib/site-packages/future-0.18.2.dist-info/RECORD
  65. 5 0
      Lib/site-packages/future-0.18.2.dist-info/WHEEL
  66. 4 0
      Lib/site-packages/future-0.18.2.dist-info/entry_points.txt
  67. 4 0
      Lib/site-packages/future-0.18.2.dist-info/top_level.txt
  68. 93 0
      Lib/site-packages/future/__init__.py
  69. 26 0
      Lib/site-packages/future/backports/__init__.py
  70. 422 0
      Lib/site-packages/future/backports/_markupbase.py
  71. 2152 0
      Lib/site-packages/future/backports/datetime.py
  72. 78 0
      Lib/site-packages/future/backports/email/__init__.py
  73. 232 0
      Lib/site-packages/future/backports/email/_encoded_words.py
  74. 2965 0
      Lib/site-packages/future/backports/email/_header_value_parser.py
  75. 546 0
      Lib/site-packages/future/backports/email/_parseaddr.py
  76. 365 0
      Lib/site-packages/future/backports/email/_policybase.py
  77. 120 0
      Lib/site-packages/future/backports/email/base64mime.py
  78. 409 0
      Lib/site-packages/future/backports/email/charset.py
  79. 90 0
      Lib/site-packages/future/backports/email/encoders.py
  80. 111 0
      Lib/site-packages/future/backports/email/errors.py
  81. 525 0
      Lib/site-packages/future/backports/email/feedparser.py
  82. 498 0
      Lib/site-packages/future/backports/email/generator.py
  83. 581 0
      Lib/site-packages/future/backports/email/header.py
  84. 592 0
      Lib/site-packages/future/backports/email/headerregistry.py
  85. 74 0
      Lib/site-packages/future/backports/email/iterators.py
  86. 882 0
      Lib/site-packages/future/backports/email/message.py
  87. 0 0
      Lib/site-packages/future/backports/email/mime/__init__.py
  88. 39 0
      Lib/site-packages/future/backports/email/mime/application.py
  89. 74 0
      Lib/site-packages/future/backports/email/mime/audio.py
  90. 25 0
      Lib/site-packages/future/backports/email/mime/base.py
  91. 48 0
      Lib/site-packages/future/backports/email/mime/image.py
  92. 36 0
      Lib/site-packages/future/backports/email/mime/message.py
  93. 49 0
      Lib/site-packages/future/backports/email/mime/multipart.py
  94. 24 0
      Lib/site-packages/future/backports/email/mime/nonmultipart.py
  95. 44 0
      Lib/site-packages/future/backports/email/mime/text.py
  96. 135 0
      Lib/site-packages/future/backports/email/parser.py
  97. 193 0
      Lib/site-packages/future/backports/email/policy.py
  98. 326 0
      Lib/site-packages/future/backports/email/quoprimime.py
  99. 400 0
      Lib/site-packages/future/backports/email/utils.py
  100. 0 0
      Lib/site-packages/future/backports/html/__init__.py

+ 1 - 0
.gitignore

@@ -39,3 +39,4 @@ _minted-*
 .*.copyright
 # notebook checkepoints
 *checkpoints
+Scripts

+ 1 - 1
.vscode/settings.json

@@ -1,4 +1,4 @@
 {
-    "python.pythonPath": "C:\\Python38\\python.exe",
+    "python.pythonPath": "c:\\Python38\\python.exe",
     "python.dataScience.jupyterServerURI": "local"
 }

+ 1 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 28 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/LICENSE.rst

@@ -0,0 +1,28 @@
+Copyright 2007 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 106 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/METADATA

@@ -0,0 +1,106 @@
+Metadata-Version: 2.1
+Name: Jinja2
+Version: 2.11.2
+Summary: A very fast and expressive template engine.
+Home-page: https://palletsprojects.com/p/jinja/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://jinja.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/jinja
+Project-URL: Issue tracker, https://github.com/pallets/jinja/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+Description-Content-Type: text/x-rst
+Requires-Dist: MarkupSafe (>=0.23)
+Provides-Extra: i18n
+Requires-Dist: Babel (>=0.8) ; extra == 'i18n'
+
+Jinja
+=====
+
+Jinja is a fast, expressive, extensible templating engine. Special
+placeholders in the template allow writing code similar to Python
+syntax. Then the template is passed data to render the final document.
+
+It includes:
+
+-   Template inheritance and inclusion.
+-   Define and import macros within templates.
+-   HTML templates can use autoescaping to prevent XSS from untrusted
+    user input.
+-   A sandboxed environment can safely render untrusted templates.
+-   AsyncIO support for generating templates and calling async
+    functions.
+-   I18N support with Babel.
+-   Templates are compiled to optimized Python code just-in-time and
+    cached, or can be compiled ahead-of-time.
+-   Exceptions point to the correct line in templates to make debugging
+    easier.
+-   Extensible filters, tests, functions, and even syntax.
+
+Jinja's philosophy is that while application logic belongs in Python if
+possible, it shouldn't make the template designer's job difficult by
+restricting functionality too much.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    $ pip install -U Jinja2
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+In A Nutshell
+-------------
+
+.. code-block:: jinja
+
+    {% extends "base.html" %}
+    {% block title %}Members{% endblock %}
+    {% block content %}
+      <ul>
+      {% for user in users %}
+        <li><a href="{{ user.url }}">{{ user.username }}</a></li>
+      {% endfor %}
+      </ul>
+    {% endblock %}
+
+
+Links
+-----
+
+-   Website: https://palletsprojects.com/p/jinja/
+-   Documentation: https://jinja.palletsprojects.com/
+-   Releases: https://pypi.org/project/Jinja2/
+-   Code: https://github.com/pallets/jinja
+-   Issue tracker: https://github.com/pallets/jinja/issues
+-   Test status: https://dev.azure.com/pallets/jinja/_build
+-   Official chat: https://discord.gg/t6rrQZH
+
+

+ 61 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/RECORD

@@ -0,0 +1,61 @@
+Jinja2-2.11.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Jinja2-2.11.2.dist-info/LICENSE.rst,sha256=O0nc7kEF6ze6wQ-vG-JgQI_oXSUrjp3y4JefweCUQ3s,1475
+Jinja2-2.11.2.dist-info/METADATA,sha256=5ZHRZoIRAMHsJPnqhlJ622_dRPsYePYJ-9EH4-Ry7yI,3535
+Jinja2-2.11.2.dist-info/RECORD,,
+Jinja2-2.11.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+Jinja2-2.11.2.dist-info/entry_points.txt,sha256=Qy_DkVo6Xj_zzOtmErrATe8lHZhOqdjpt3e4JJAGyi8,61
+Jinja2-2.11.2.dist-info/top_level.txt,sha256=PkeVWtLb3-CqjWi1fO29OCbj55EhX_chhKrCdrVe_zs,7
+jinja2/__init__.py,sha256=0QCM_jKKDM10yzSdHRVV4mQbCbDqf0GN0GirAqibn9Y,1549
+jinja2/__pycache__/__init__.cpython-38.pyc,,
+jinja2/__pycache__/_compat.cpython-38.pyc,,
+jinja2/__pycache__/_identifier.cpython-38.pyc,,
+jinja2/__pycache__/asyncfilters.cpython-38.pyc,,
+jinja2/__pycache__/asyncsupport.cpython-38.pyc,,
+jinja2/__pycache__/bccache.cpython-38.pyc,,
+jinja2/__pycache__/compiler.cpython-38.pyc,,
+jinja2/__pycache__/constants.cpython-38.pyc,,
+jinja2/__pycache__/debug.cpython-38.pyc,,
+jinja2/__pycache__/defaults.cpython-38.pyc,,
+jinja2/__pycache__/environment.cpython-38.pyc,,
+jinja2/__pycache__/exceptions.cpython-38.pyc,,
+jinja2/__pycache__/ext.cpython-38.pyc,,
+jinja2/__pycache__/filters.cpython-38.pyc,,
+jinja2/__pycache__/idtracking.cpython-38.pyc,,
+jinja2/__pycache__/lexer.cpython-38.pyc,,
+jinja2/__pycache__/loaders.cpython-38.pyc,,
+jinja2/__pycache__/meta.cpython-38.pyc,,
+jinja2/__pycache__/nativetypes.cpython-38.pyc,,
+jinja2/__pycache__/nodes.cpython-38.pyc,,
+jinja2/__pycache__/optimizer.cpython-38.pyc,,
+jinja2/__pycache__/parser.cpython-38.pyc,,
+jinja2/__pycache__/runtime.cpython-38.pyc,,
+jinja2/__pycache__/sandbox.cpython-38.pyc,,
+jinja2/__pycache__/tests.cpython-38.pyc,,
+jinja2/__pycache__/utils.cpython-38.pyc,,
+jinja2/__pycache__/visitor.cpython-38.pyc,,
+jinja2/_compat.py,sha256=B6Se8HjnXVpzz9-vfHejn-DV2NjaVK-Iewupc5kKlu8,3191
+jinja2/_identifier.py,sha256=EdgGJKi7O1yvr4yFlvqPNEqV6M1qHyQr8Gt8GmVTKVM,1775
+jinja2/asyncfilters.py,sha256=XJtYXTxFvcJ5xwk6SaDL4S0oNnT0wPYvXBCSzc482fI,4250
+jinja2/asyncsupport.py,sha256=ZBFsDLuq3Gtji3Ia87lcyuDbqaHZJRdtShZcqwpFnSQ,7209
+jinja2/bccache.py,sha256=3Pmp4jo65M9FQuIxdxoDBbEDFwe4acDMQf77nEJfrHA,12139
+jinja2/compiler.py,sha256=Ta9W1Lit542wItAHXlDcg0sEOsFDMirCdlFPHAurg4o,66284
+jinja2/constants.py,sha256=RR1sTzNzUmKco6aZicw4JpQpJGCuPuqm1h1YmCNUEFY,1458
+jinja2/debug.py,sha256=neR7GIGGjZH3_ILJGVUYy3eLQCCaWJMXOb7o0kGInWc,8529
+jinja2/defaults.py,sha256=85B6YUUCyWPSdrSeVhcqFVuu_bHUAQXeey--FIwSeVQ,1126
+jinja2/environment.py,sha256=XDSLKc4SqNLMOwTSq3TbWEyA5WyXfuLuVD0wAVjEFwM,50629
+jinja2/exceptions.py,sha256=VjNLawcmf2ODffqVMCQK1cRmvFaUfQWF4u8ouP3QPcE,5425
+jinja2/ext.py,sha256=AtwL5O5enT_L3HR9-oBvhGyUTdGoyaqG_ICtnR_EVd4,26441
+jinja2/filters.py,sha256=_RpPgAlgIj7ExvyDzcHAC3B36cocfWK-1TEketbNeM0,41415
+jinja2/idtracking.py,sha256=J3O4VHsrbf3wzwiBc7Cro26kHb6_5kbULeIOzocchIU,9211
+jinja2/lexer.py,sha256=nUFLRKhhKmmEWkLI65nQePgcQs7qsRdjVYZETMt_v0g,30331
+jinja2/loaders.py,sha256=C-fST_dmFjgWkp0ZuCkrgICAoOsoSIF28wfAFink0oU,17666
+jinja2/meta.py,sha256=QjyYhfNRD3QCXjBJpiPl9KgkEkGXJbAkCUq4-Ur10EQ,4131
+jinja2/nativetypes.py,sha256=Ul__gtVw4xH-0qvUvnCNHedQeNDwmEuyLJztzzSPeRg,2753
+jinja2/nodes.py,sha256=Mk1oJPVgIjnQw9WOqILvcu3rLepcFZ0ahxQm2mbwDwc,31095
+jinja2/optimizer.py,sha256=gQLlMYzvQhluhzmAIFA1tXS0cwgWYOjprN-gTRcHVsc,1457
+jinja2/parser.py,sha256=fcfdqePNTNyvosIvczbytVA332qpsURvYnCGcjDHSkA,35660
+jinja2/runtime.py,sha256=0y-BRyIEZ9ltByL2Id6GpHe1oDRQAwNeQvI0SKobNMw,30618
+jinja2/sandbox.py,sha256=knayyUvXsZ-F0mk15mO2-ehK9gsw04UhB8td-iUOtLc,17127
+jinja2/tests.py,sha256=iO_Y-9Vo60zrVe1lMpSl5sKHqAxe2leZHC08OoZ8K24,4799
+jinja2/utils.py,sha256=OoVMlQe9S2-lWT6jJbTu9tDuDvGNyWUhHDcE51i5_Do,22522
+jinja2/visitor.py,sha256=DUHupl0a4PGp7nxRtZFttUzAi1ccxzqc2hzetPYUz8U,3240

+ 6 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/WHEEL

@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+

+ 3 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/entry_points.txt

@@ -0,0 +1,3 @@
+[babel.extractors]
+jinja2 = jinja2.ext:babel_extract [i18n]
+

+ 1 - 0
Lib/site-packages/Jinja2-2.11.2.dist-info/top_level.txt

@@ -0,0 +1 @@
+jinja2

+ 1 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 29 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/LICENSE.md

@@ -0,0 +1,29 @@
+Copyright 2007, 2008 The Python Markdown Project (v. 1.7 and later)
+Copyright 2004, 2005, 2006 Yuri Takhteyev (v. 0.2-1.6b)
+Copyright 2004 Manfred Stienstra (the original version)
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+* Neither the name of the Python Markdown Project nor the
+  names of its contributors may be used to endorse or promote products
+  derived from this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE PYTHON MARKDOWN PROJECT ''AS IS'' AND ANY
+EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL ANY CONTRIBUTORS TO THE PYTHON MARKDOWN PROJECT
+BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.

+ 57 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/METADATA

@@ -0,0 +1,57 @@
+Metadata-Version: 2.1
+Name: Markdown
+Version: 3.2.2
+Summary: Python implementation of Markdown.
+Home-page: https://Python-Markdown.github.io/
+Author: Manfred Stienstra, Yuri takhteyev and Waylan limberg
+Author-email: waylan.limberg@icloud.com
+Maintainer: Waylan Limberg
+Maintainer-email: waylan.limberg@icloud.com
+License: BSD License
+Download-URL: http://pypi.python.org/packages/source/M/Markdown/Markdown-3.2.2-py2.py3-none-any.whl
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: 3 :: Only
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Communications :: Email :: Filters
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content :: CGI Tools/Libraries
+Classifier: Topic :: Internet :: WWW/HTTP :: Site Management
+Classifier: Topic :: Software Development :: Documentation
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Filters
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=3.5
+Requires-Dist: importlib-metadata ; python_version < "3.8"
+Provides-Extra: testing
+Requires-Dist: coverage ; extra == 'testing'
+Requires-Dist: pyyaml ; extra == 'testing'
+
+
+This is a Python implementation of John Gruber's Markdown_.
+It is almost completely compliant with the reference implementation,
+though there are a few known issues. See Features_ for information
+on what exactly is supported and what is not. Additional features are
+supported by the `Available Extensions`_.
+
+.. _Markdown: https://daringfireball.net/projects/markdown/
+.. _Features: https://Python-Markdown.github.io#features
+.. _`Available Extensions`: https://Python-Markdown.github.io/extensions/
+
+Support
+=======
+
+You may report bugs, ask for help, and discuss various other issues on
+the `bug tracker`_.
+
+.. _`bug tracker`: https://github.com/Python-Markdown/markdown/issues
+
+

+ 74 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/RECORD

@@ -0,0 +1,74 @@
+../../Scripts/markdown_py.exe,sha256=bpiHRmLSpMJEvVYm3ii5bmFeEOAFVDK_b88NTxGvNDU,106370
+Markdown-3.2.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Markdown-3.2.2.dist-info/LICENSE.md,sha256=bxGTy2NHGOZcOlN9biXr1hSCDsDvaTz8EiSBEmONZNo,1645
+Markdown-3.2.2.dist-info/METADATA,sha256=zgAbjRRuBBmQaECWiKBHfwpPd1JjR8qQ3-39oaAHHaI,2409
+Markdown-3.2.2.dist-info/RECORD,,
+Markdown-3.2.2.dist-info/WHEEL,sha256=S8S5VL-stOTSZDYxHyf0KP7eds0J72qrK0Evu3TfyAY,92
+Markdown-3.2.2.dist-info/entry_points.txt,sha256=j4jiKg-iwZGImvi8OzotZePWoFbJJ4GrfzDqH03u3SQ,1103
+Markdown-3.2.2.dist-info/top_level.txt,sha256=IAxs8x618RXoH1uCqeLLxXsDefJvE_mIibr_M4sOlyk,9
+markdown/__init__.py,sha256=002-LuHviYzROW2rg_gBGai81nMouUNO9UFj5nSsTSk,2065
+markdown/__main__.py,sha256=MpVK3zlwQ-4AzDzZmIScPB90PpunMGVgS5KBmJuHYTw,5802
+markdown/__meta__.py,sha256=1KPSWlbs623C1QTn8qYSTfbDKrYQFyydYwlRep-5ATk,1630
+markdown/__pycache__/__init__.cpython-38.pyc,,
+markdown/__pycache__/__main__.cpython-38.pyc,,
+markdown/__pycache__/__meta__.cpython-38.pyc,,
+markdown/__pycache__/blockparser.cpython-38.pyc,,
+markdown/__pycache__/blockprocessors.cpython-38.pyc,,
+markdown/__pycache__/core.cpython-38.pyc,,
+markdown/__pycache__/inlinepatterns.cpython-38.pyc,,
+markdown/__pycache__/pep562.cpython-38.pyc,,
+markdown/__pycache__/postprocessors.cpython-38.pyc,,
+markdown/__pycache__/preprocessors.cpython-38.pyc,,
+markdown/__pycache__/serializers.cpython-38.pyc,,
+markdown/__pycache__/test_tools.cpython-38.pyc,,
+markdown/__pycache__/treeprocessors.cpython-38.pyc,,
+markdown/__pycache__/util.cpython-38.pyc,,
+markdown/blockparser.py,sha256=JpBhOokOoBUGCXolftOc5m1hPcR2y9s9hVd9WSuhHzo,4285
+markdown/blockprocessors.py,sha256=l4gmkAN9b2L340EX0gm24EyWS7UzBviPqX6wYrcgEco,23736
+markdown/core.py,sha256=Hm1VMHvmHJiVMta9y1vFyUx04OWjTP3yRJ-al4Rqz1U,15278
+markdown/extensions/__init__.py,sha256=nw2VtafIf5zHjAcUuykQbaNY6taOmNn7ARn11-Pe080,3661
+markdown/extensions/__pycache__/__init__.cpython-38.pyc,,
+markdown/extensions/__pycache__/abbr.cpython-38.pyc,,
+markdown/extensions/__pycache__/admonition.cpython-38.pyc,,
+markdown/extensions/__pycache__/attr_list.cpython-38.pyc,,
+markdown/extensions/__pycache__/codehilite.cpython-38.pyc,,
+markdown/extensions/__pycache__/def_list.cpython-38.pyc,,
+markdown/extensions/__pycache__/extra.cpython-38.pyc,,
+markdown/extensions/__pycache__/fenced_code.cpython-38.pyc,,
+markdown/extensions/__pycache__/footnotes.cpython-38.pyc,,
+markdown/extensions/__pycache__/legacy_attrs.cpython-38.pyc,,
+markdown/extensions/__pycache__/legacy_em.cpython-38.pyc,,
+markdown/extensions/__pycache__/md_in_html.cpython-38.pyc,,
+markdown/extensions/__pycache__/meta.cpython-38.pyc,,
+markdown/extensions/__pycache__/nl2br.cpython-38.pyc,,
+markdown/extensions/__pycache__/sane_lists.cpython-38.pyc,,
+markdown/extensions/__pycache__/smarty.cpython-38.pyc,,
+markdown/extensions/__pycache__/tables.cpython-38.pyc,,
+markdown/extensions/__pycache__/toc.cpython-38.pyc,,
+markdown/extensions/__pycache__/wikilinks.cpython-38.pyc,,
+markdown/extensions/abbr.py,sha256=pqp2HnOR2giT-iYKyqtsp2_eUOWBR0j_hUfjvUV5c88,2916
+markdown/extensions/admonition.py,sha256=HWHHjuYZPAPOg5X8hbpDuSbw8gB6k0odw8GuTT1v_N4,3124
+markdown/extensions/attr_list.py,sha256=m9a1H-S33rV2twtlFYuoxSiCAf22ndU5tziSzNF2dNg,6003
+markdown/extensions/codehilite.py,sha256=ChzzzrRIQZQwhQ7dLEIXDKTnemZzmEJCoNFcw6Sp5gk,9838
+markdown/extensions/def_list.py,sha256=iqRXAEl2XnyF415afCxihAgOmEUOK1hIuBPIK1k7Tzo,3521
+markdown/extensions/extra.py,sha256=udRN8OvSWcq3UwkPygvsFl1RlCVtCJ-ARVg2IwVH6VY,1831
+markdown/extensions/fenced_code.py,sha256=dww9rDu2kQtkoTpjn9BBgeGCTNdE1bMPJ2wgR6695iM,3897
+markdown/extensions/footnotes.py,sha256=a9sb8RoKqFU8p8ZhpTObrn_Uek0hbyPFVGYpRaEDXaw,15339
+markdown/extensions/legacy_attrs.py,sha256=2EaVQkxQoNnP8_lMPvGRBdNda8L4weUQroiyEuVdS-w,2547
+markdown/extensions/legacy_em.py,sha256=9ZMGCTrFh01eiOpnFjS0jVkqgYXiTzCGn-eNvYcvObg,1579
+markdown/extensions/md_in_html.py,sha256=ohSiGcgR5yBqusuTs0opbTO_5fq442fqPK-klFd_qaM,4040
+markdown/extensions/meta.py,sha256=EUfkzM7l7UpH__Or9K3pl8ldVddwndlCZWA3d712RAE,2331
+markdown/extensions/nl2br.py,sha256=wAqTNOuf2L1NzlEvEqoID70n9y-aiYaGLkuyQk3CD0w,783
+markdown/extensions/sane_lists.py,sha256=ZQmCf-247KBexVG0fc62nDvokGkV6W1uavYbieNKSG4,1505
+markdown/extensions/smarty.py,sha256=0padzkVCNACainKw-Xj1S5UfT0125VCTfNejmrCZItA,10238
+markdown/extensions/tables.py,sha256=bicFx_wqhnEx6Y_8MJqA56rh71pt5fOe94oiWbvcobY,7685
+markdown/extensions/toc.py,sha256=C45ZZxlp9uDTE1WW9EvLTKT24H2SY0HM3nUDfGlzgng,13477
+markdown/extensions/wikilinks.py,sha256=GkgT9BY7b1-qW--dIwFAhC9V20RoeF13b7CFdw_V21Q,2812
+markdown/inlinepatterns.py,sha256=EnYq9aU_Hi1gu5e8dcbUxUu0mRz-pHFV79uGQCYbD5I,29378
+markdown/pep562.py,sha256=5UkqT7sb-cQufgbOl_jF-RYUVVHS7VThzlMzR9vrd3I,8917
+markdown/postprocessors.py,sha256=25g6qqpJ4kuiq4RBrGz8RA6GMb7ArUi1AN2VDVnR35U,3738
+markdown/preprocessors.py,sha256=dsmMVPP2afKAZ0s59_mFidM_mCiNfgdBJ9aVDWu_viE,15323
+markdown/serializers.py,sha256=_wQl-iJrPSUEQ4Q1owWYqN9qceVh6TOlAOH_i44BKAQ,6540
+markdown/test_tools.py,sha256=qwF7x7QZa9Wa509Y4KJfmc1O3udrtXgH1AXj33TpDso,6823
+markdown/treeprocessors.py,sha256=NBaYc9TEGP7TBaN6YRROIqE5Lj-AMoAqp0jN-coGW3Q,15401
+markdown/util.py,sha256=3-F0KcHEYOIOayAt8FIeoUxNfLlfMeiSF3EE3dWzxg8,15499

+ 5 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.4)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 23 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/entry_points.txt

@@ -0,0 +1,23 @@
+[console_scripts]
+markdown_py = markdown.__main__:run
+
+[markdown.extensions]
+abbr = markdown.extensions.abbr:AbbrExtension
+admonition = markdown.extensions.admonition:AdmonitionExtension
+attr_list = markdown.extensions.attr_list:AttrListExtension
+codehilite = markdown.extensions.codehilite:CodeHiliteExtension
+def_list = markdown.extensions.def_list:DefListExtension
+extra = markdown.extensions.extra:ExtraExtension
+fenced_code = markdown.extensions.fenced_code:FencedCodeExtension
+footnotes = markdown.extensions.footnotes:FootnoteExtension
+legacy_attrs = markdown.extensions.legacy_attrs:LegacyAttrExtension
+legacy_em = markdown.extensions.legacy_em:LegacyEmExtension
+md_in_html = markdown.extensions.md_in_html:MarkdownInHtmlExtension
+meta = markdown.extensions.meta:MetaExtension
+nl2br = markdown.extensions.nl2br:Nl2BrExtension
+sane_lists = markdown.extensions.sane_lists:SaneListExtension
+smarty = markdown.extensions.smarty:SmartyExtension
+tables = markdown.extensions.tables:TableExtension
+toc = markdown.extensions.toc:TocExtension
+wikilinks = markdown.extensions.wikilinks:WikiLinkExtension
+

+ 1 - 0
Lib/site-packages/Markdown-3.2.2.dist-info/top_level.txt

@@ -0,0 +1 @@
+markdown

+ 1 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 28 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/LICENSE.rst

@@ -0,0 +1,28 @@
+Copyright 2010 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 105 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/METADATA

@@ -0,0 +1,105 @@
+Metadata-Version: 2.1
+Name: MarkupSafe
+Version: 1.1.1
+Summary: Safely add untrusted strings to HTML/XML markup.
+Home-page: https://palletsprojects.com/p/markupsafe/
+Author: Armin Ronacher
+Author-email: armin.ronacher@active-4.com
+Maintainer: The Pallets Team
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://markupsafe.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/markupsafe
+Project-URL: Issue tracker, https://github.com/pallets/markupsafe/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Web Environment
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Topic :: Internet :: WWW/HTTP :: Dynamic Content
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup :: HTML
+Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*
+Description-Content-Type: text/x-rst
+
+MarkupSafe
+==========
+
+MarkupSafe implements a text object that escapes characters so it is
+safe to use in HTML and XML. Characters that have special meanings are
+replaced so that they display as the actual characters. This mitigates
+injection attacks, meaning untrusted user input can safely be displayed
+on a page.
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    pip install -U MarkupSafe
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+Examples
+--------
+
+.. code-block:: pycon
+
+    >>> from markupsafe import Markup, escape
+    >>> # escape replaces special characters and wraps in Markup
+    >>> escape('<script>alert(document.cookie);</script>')
+    Markup(u'&lt;script&gt;alert(document.cookie);&lt;/script&gt;')
+    >>> # wrap in Markup to mark text "safe" and prevent escaping
+    >>> Markup('<strong>Hello</strong>')
+    Markup('<strong>hello</strong>')
+    >>> escape(Markup('<strong>Hello</strong>'))
+    Markup('<strong>hello</strong>')
+    >>> # Markup is a text subclass (str on Python 3, unicode on Python 2)
+    >>> # methods and operators escape their arguments
+    >>> template = Markup("Hello <em>%s</em>")
+    >>> template % '"World"'
+    Markup('Hello <em>&#34;World&#34;</em>')
+
+
+Donate
+------
+
+The Pallets organization develops and supports MarkupSafe and other
+libraries that use it. In order to grow the community of contributors
+and users, and allow the maintainers to devote more time to the
+projects, `please donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+*   Website: https://palletsprojects.com/p/markupsafe/
+*   Documentation: https://markupsafe.palletsprojects.com/
+*   License: `BSD-3-Clause <https://github.com/pallets/markupsafe/blob/master/LICENSE.rst>`_
+*   Releases: https://pypi.org/project/MarkupSafe/
+*   Code: https://github.com/pallets/markupsafe
+*   Issue tracker: https://github.com/pallets/markupsafe/issues
+*   Test status:
+
+    *   Linux, Mac: https://travis-ci.org/pallets/markupsafe
+    *   Windows: https://ci.appveyor.com/project/pallets/markupsafe
+
+*   Test coverage: https://codecov.io/gh/pallets/markupsafe
+*   Official chat: https://discord.gg/t6rrQZH
+
+

+ 15 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/RECORD

@@ -0,0 +1,15 @@
+MarkupSafe-1.1.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+MarkupSafe-1.1.1.dist-info/LICENSE.rst,sha256=RjHsDbX9kKVH4zaBcmTGeYIUM4FG-KyUtKV_lu6MnsQ,1503
+MarkupSafe-1.1.1.dist-info/METADATA,sha256=IFCP4hCNGjXJgMoSvdjPiKDLAMUTTWoxKXQsQvmyMNU,3653
+MarkupSafe-1.1.1.dist-info/RECORD,,
+MarkupSafe-1.1.1.dist-info/WHEEL,sha256=z-ezgbNu1Y2ixypRrBmFDHP9mmiBhiGywozViqVfAIc,105
+MarkupSafe-1.1.1.dist-info/top_level.txt,sha256=qy0Plje5IJuvsCBjejJyhDCjEAdcDLK_2agVcex8Z6U,11
+markupsafe/__init__.py,sha256=UAy1UKlykemnSZWIVn8RDqY0wvjV6lkeRwYOMNhw4bA,10453
+markupsafe/__pycache__/__init__.cpython-38.pyc,,
+markupsafe/__pycache__/_compat.cpython-38.pyc,,
+markupsafe/__pycache__/_constants.cpython-38.pyc,,
+markupsafe/__pycache__/_native.cpython-38.pyc,,
+markupsafe/_compat.py,sha256=XweNhJEcyTP_wIBUaIO6nxzIb6XFwweriXyZfiTpkdw,591
+markupsafe/_constants.py,sha256=IXLUQkLM6CTustG5vEQTEy6pBB3z5pm84NkYU1aW9qI,4954
+markupsafe/_native.py,sha256=LwsYk-GHoPsPboRD_tNC6_jTmCj3MLtsnDFis7HjE50,1942
+markupsafe/_speedups.cp38-win_amd64.pyd,sha256=5dfKRjDzuEw9S4XjBgpxYSnBLLuHGj7pmC0-u3p_Qc4,14848

+ 5 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: false
+Tag: cp38-cp38-win_amd64
+

+ 1 - 0
Lib/site-packages/MarkupSafe-1.1.1.dist-info/top_level.txt

@@ -0,0 +1 @@
+markupsafe

+ 1 - 0
Lib/site-packages/PyYAML-5.3.1.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 20 - 0
Lib/site-packages/PyYAML-5.3.1.dist-info/LICENSE

@@ -0,0 +1,20 @@
+Copyright (c) 2017-2020 Ingy döt Net
+Copyright (c) 2006-2016 Kirill Simonov
+
+Permission is hereby granted, free of charge, to any person obtaining a copy of
+this software and associated documentation files (the "Software"), to deal in
+the Software without restriction, including without limitation the rights to
+use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
+of the Software, and to permit persons to whom the Software is furnished to do
+so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.

+ 40 - 0
Lib/site-packages/PyYAML-5.3.1.dist-info/METADATA

@@ -0,0 +1,40 @@
+Metadata-Version: 2.1
+Name: PyYAML
+Version: 5.3.1
+Summary: YAML parser and emitter for Python
+Home-page: https://github.com/yaml/pyyaml
+Author: Kirill Simonov
+Author-email: xi@resolvent.net
+License: MIT
+Download-URL: https://pypi.org/project/PyYAML/
+Platform: Any
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Cython
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Topic :: Software Development :: Libraries :: Python Modules
+Classifier: Topic :: Text Processing :: Markup
+
+YAML is a data serialization format designed for human readability
+and interaction with scripting languages.  PyYAML is a YAML parser
+and emitter for Python.
+
+PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
+support, capable extension API, and sensible error messages.  PyYAML
+supports standard YAML tags and provides Python-specific tags that
+allow to represent an arbitrary Python object.
+
+PyYAML is applicable for a broad range of tasks from complex
+configuration files to object serialization and persistence.
+

+ 40 - 0
Lib/site-packages/PyYAML-5.3.1.dist-info/RECORD

@@ -0,0 +1,40 @@
+PyYAML-5.3.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+PyYAML-5.3.1.dist-info/LICENSE,sha256=xAESRJ8lS5dTBFklJIMT6ScO-jbSJrItgtTMbEPFfyk,1101
+PyYAML-5.3.1.dist-info/METADATA,sha256=JMHGIwZThSlNcmGxXTVZ-NFNK3KiEHBdfryJFfPG7Yk,1690
+PyYAML-5.3.1.dist-info/RECORD,,
+PyYAML-5.3.1.dist-info/WHEEL,sha256=-ODc2a2AO_YJ5T46NOquHfWjRM7bQvlt-f3zRaLBjL4,105
+_yaml.cp38-win_amd64.pyd,sha256=4GsUf9UXEtLFhk52pQCdWR15hJ4STDjUQth8KLldsQ8,284672
+yaml/__init__.py,sha256=XFUNbKTg4afAd0BETjGQ1mKQ97_g5jbE1C0WoKc74dc,13170
+yaml/__pycache__/__init__.cpython-38.pyc,sha256=VIr8pQ7J6C7GTdteeqhWWDKmp6o5b7KpXk9X9HEhXqM,11842
+yaml/__pycache__/composer.cpython-38.pyc,sha256=1QlHfIiSaExiYXxyPcOWx_K1v3OjqyW6QJQGeA5loDg,3560
+yaml/__pycache__/constructor.cpython-38.pyc,sha256=yj61plpCRp5H70DJ_Cakt-Avp_-0Wd0e3bRwLiNn5nQ,20819
+yaml/__pycache__/cyaml.cpython-38.pyc,sha256=VB_nI5aH0e7dPslaQTAx4m3BtdPpnmJw_yplqglilq8,3403
+yaml/__pycache__/dumper.cpython-38.pyc,sha256=6Hz-MxVnKWNrL9AF5bhrazSzRsLASWdbKYcVOXM7Qvw,1820
+yaml/__pycache__/emitter.cpython-38.pyc,sha256=UmRyfsvoEeb3aWG9DH5aY5Lrfmp7j3MZACiM90X-T7o,25350
+yaml/__pycache__/error.cpython-38.pyc,sha256=raWrNypX-e6i4s-SGToFOLg1mLSPs5c4ckc5iQXk20g,2297
+yaml/__pycache__/events.cpython-38.pyc,sha256=HBhv35Azy6dUDZSqCDPIsTzoyo-1y1kV3-z8M5AX-0I,3971
+yaml/__pycache__/loader.cpython-38.pyc,sha256=sQo_AFAhK20zN8w0s_dPtX88kg3M_a3C0bm8oZv4eIc,2161
+yaml/__pycache__/nodes.cpython-38.pyc,sha256=qGsbHemePSdhFJszDBi1uqjTtsO5F0c42Do-8EnEUeU,1722
+yaml/__pycache__/parser.cpython-38.pyc,sha256=c-zTpzjoNNwLnj-IiFxuy5F3P_Jo_0Tpm3hvAm2Jwno,11921
+yaml/__pycache__/reader.cpython-38.pyc,sha256=IvAiKEPwRjeoQjywtMZMcvNikFEdsbl1WkfqmJ5Orxc,4534
+yaml/__pycache__/representer.cpython-38.pyc,sha256=iArfEC3B4wSRK0Y04gr9l66nOdrRIcK1kK70YJnoSKk,10066
+yaml/__pycache__/resolver.cpython-38.pyc,sha256=tkBjJOtUkhyvchWgouur-mdhwi2uk9XkTak-f1ws9DY,5475
+yaml/__pycache__/scanner.cpython-38.pyc,sha256=2fxtpWWuiB3DGZajd8KsZaJ6Hugt5wtAdJbsY56nhYo,25266
+yaml/__pycache__/serializer.cpython-38.pyc,sha256=DUQCfMIgwmGaHM0nYsTk_R7qSBFOOMfnxbf0TZr_Vlc,3317
+yaml/__pycache__/tokens.cpython-38.pyc,sha256=hLLE8--b5O6JkJEOGPMxCyJDyz4IraBhAJapxxTvvPI,4932
+yaml/composer.py,sha256=_Ko30Wr6eDWUeUpauUGT3Lcg9QPBnOPVlTnIMRGJ9FM,4883
+yaml/constructor.py,sha256=O3Uaf0_J_5GQBoeI9ZNhpJAhtdagr_X2HzDgGbZOMnw,28627
+yaml/cyaml.py,sha256=LiMkvchNonfoy1F6ec9L2BiUz3r0bwF4hympASJX1Ic,3846
+yaml/dumper.py,sha256=PLctZlYwZLp7XmeUdwRuv4nYOZ2UBnDIUy8-lKfLF-o,2837
+yaml/emitter.py,sha256=jghtaU7eFwg31bG0B7RZea_29Adi9CKmXq_QjgQpCkQ,43006
+yaml/error.py,sha256=Ah9z-toHJUbE9j-M8YpxgSRM5CgLCcwVzJgLLRF2Fxo,2533
+yaml/events.py,sha256=50_TksgQiE4up-lKo_V-nBy-tAIxkIPQxY5qDhKCeHw,2445
+yaml/loader.py,sha256=UVa-zIqmkFSCIYq_PgSGm4NSJttHY2Rf_zQ4_b1fHN0,2061
+yaml/nodes.py,sha256=gPKNj8pKCdh2d4gr3gIYINnPOaOxGhJAUiYhGRnPE84,1440
+yaml/parser.py,sha256=ilWp5vvgoHFGzvOZDItFoGjD6D42nhlZrZyjAwa0oJo,25495
+yaml/reader.py,sha256=0dmzirOiDG4Xo41RnuQS7K9rkY3xjHiVasfDMNTqCNw,6794
+yaml/representer.py,sha256=82UM3ZxUQKqsKAF4ltWOxCS6jGPIFtXpGs7mvqyv4Xs,14184
+yaml/resolver.py,sha256=DJCjpQr8YQCEYYjKEYqTl0GrsZil2H4aFOI9b0Oe-U4,8970
+yaml/scanner.py,sha256=KeQIKGNlSyPE8QDwionHxy9CgbqE5teJEz05FR9-nAg,51277
+yaml/serializer.py,sha256=ChuFgmhU01hj4xgI8GaKv6vfM2Bujwa9i7d2FAHj7cA,4165
+yaml/tokens.py,sha256=lTQIzSVw8Mg9wv459-TjiOQe6wVziqaRlqX2_89rp54,2573

+ 5 - 0
Lib/site-packages/PyYAML-5.3.1.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: false
+Tag: cp38-cp38-win_amd64
+

+ 231 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/AUTHORS

@@ -0,0 +1,231 @@
+Pygments is written and maintained by Georg Brandl <georg@python.org>.
+
+Major developers are Tim Hatch <tim@timhatch.com> and Armin Ronacher
+<armin.ronacher@active-4.com>.
+
+Other contributors, listed alphabetically, are:
+
+* Sam Aaron -- Ioke lexer
+* Ali Afshar -- image formatter
+* Thomas Aglassinger -- Easytrieve, JCL, Rexx, Transact-SQL and VBScript
+  lexers
+* Muthiah Annamalai -- Ezhil lexer
+* Kumar Appaiah -- Debian control lexer
+* Andreas Amann -- AppleScript lexer
+* Timothy Armstrong -- Dart lexer fixes
+* Jeffrey Arnold -- R/S, Rd, BUGS, Jags, and Stan lexers
+* Jeremy Ashkenas -- CoffeeScript lexer
+* José Joaquín Atria -- Praat lexer
+* Stefan Matthias Aust -- Smalltalk lexer
+* Lucas Bajolet -- Nit lexer
+* Ben Bangert -- Mako lexers
+* Max Battcher -- Darcs patch lexer
+* Thomas Baruchel -- APL lexer
+* Tim Baumann -- (Literate) Agda lexer
+* Paul Baumgart, 280 North, Inc. -- Objective-J lexer
+* Michael Bayer -- Myghty lexers
+* Thomas Beale -- Archetype lexers
+* John Benediktsson -- Factor lexer
+* Trevor Bergeron -- mIRC formatter
+* Vincent Bernat -- LessCSS lexer
+* Christopher Bertels -- Fancy lexer
+* Sébastien Bigaret -- QVT Operational lexer
+* Jarrett Billingsley -- MiniD lexer
+* Adam Blinkinsop -- Haskell, Redcode lexers
+* Stéphane Blondon -- SGF and Sieve lexers
+* Frits van Bommel -- assembler lexers
+* Pierre Bourdon -- bugfixes
+* Martijn Braam -- Kernel log lexer
+* Matthias Bussonnier -- ANSI style handling for terminal-256 formatter
+* chebee7i -- Python traceback lexer improvements
+* Hiram Chirino -- Scaml and Jade lexers
+* Mauricio Caceres -- SAS and Stata lexers.
+* Ian Cooper -- VGL lexer
+* David Corbett -- Inform, Jasmin, JSGF, Snowball, and TADS 3 lexers
+* Leaf Corcoran -- MoonScript lexer
+* Christopher Creutzig -- MuPAD lexer
+* Daniël W. Crompton -- Pike lexer
+* Pete Curry -- bugfixes
+* Bryan Davis -- EBNF lexer
+* Bruno Deferrari -- Shen lexer
+* Giedrius Dubinskas -- HTML formatter improvements
+* Owen Durni -- Haxe lexer
+* Alexander Dutton, Oxford University Computing Services -- SPARQL lexer
+* James Edwards -- Terraform lexer
+* Nick Efford -- Python 3 lexer
+* Sven Efftinge -- Xtend lexer
+* Artem Egorkine -- terminal256 formatter
+* Matthew Fernandez -- CAmkES lexer
+* Michael Ficarra -- CPSA lexer
+* James H. Fisher -- PostScript lexer
+* William S. Fulton -- SWIG lexer
+* Carlos Galdino -- Elixir and Elixir Console lexers
+* Michael Galloy -- IDL lexer
+* Naveen Garg -- Autohotkey lexer
+* Simon Garnotel -- FreeFem++ lexer
+* Laurent Gautier -- R/S lexer
+* Alex Gaynor -- PyPy log lexer
+* Richard Gerkin -- Igor Pro lexer
+* Alain Gilbert -- TypeScript lexer
+* Alex Gilding -- BlitzBasic lexer
+* GitHub, Inc -- DASM16, Augeas, TOML, and Slash lexers
+* Bertrand Goetzmann -- Groovy lexer
+* Krzysiek Goj -- Scala lexer
+* Rostyslav Golda -- FloScript lexer
+* Andrey Golovizin -- BibTeX lexers
+* Matt Good -- Genshi, Cheetah lexers
+* Michał Górny -- vim modeline support
+* Alex Gosse -- TrafficScript lexer
+* Patrick Gotthardt -- PHP namespaces support
+* Olivier Guibe -- Asymptote lexer
+* Phil Hagelberg -- Fennel lexer
+* Florian Hahn -- Boogie lexer
+* Martin Harriman -- SNOBOL lexer
+* Matthew Harrison -- SVG formatter
+* Steven Hazel -- Tcl lexer
+* Dan Michael Heggø -- Turtle lexer
+* Aslak Hellesøy -- Gherkin lexer
+* Greg Hendershott -- Racket lexer
+* Justin Hendrick -- ParaSail lexer
+* Jordi Gutiérrez Hermoso -- Octave lexer
+* David Hess, Fish Software, Inc. -- Objective-J lexer
+* Varun Hiremath -- Debian control lexer
+* Rob Hoelz -- Perl 6 lexer
+* Doug Hogan -- Mscgen lexer
+* Ben Hollis -- Mason lexer
+* Max Horn -- GAP lexer
+* Alastair Houghton -- Lexer inheritance facility
+* Tim Howard -- BlitzMax lexer
+* Dustin Howett -- Logos lexer
+* Ivan Inozemtsev -- Fantom lexer
+* Hiroaki Itoh -- Shell console rewrite, Lexers for PowerShell session,
+  MSDOS session, BC, WDiff
+* Brian R. Jackson -- Tea lexer
+* Christian Jann -- ShellSession lexer
+* Dennis Kaarsemaker -- sources.list lexer
+* Dmitri Kabak -- Inferno Limbo lexer
+* Igor Kalnitsky -- vhdl lexer
+* Colin Kennedy - USD lexer
+* Alexander Kit -- MaskJS lexer
+* Pekka Klärck -- Robot Framework lexer
+* Gerwin Klein -- Isabelle lexer
+* Eric Knibbe -- Lasso lexer
+* Stepan Koltsov -- Clay lexer
+* Adam Koprowski -- Opa lexer
+* Benjamin Kowarsch -- Modula-2 lexer
+* Domen Kožar -- Nix lexer
+* Oleh Krekel -- Emacs Lisp lexer
+* Alexander Kriegisch -- Kconfig and AspectJ lexers
+* Marek Kubica -- Scheme lexer
+* Jochen Kupperschmidt -- Markdown processor
+* Gerd Kurzbach -- Modelica lexer
+* Jon Larimer, Google Inc. -- Smali lexer
+* Olov Lassus -- Dart lexer
+* Matt Layman -- TAP lexer
+* Kristian Lyngstøl -- Varnish lexers
+* Sylvestre Ledru -- Scilab lexer
+* Chee Sing Lee -- Flatline lexer
+* Mark Lee -- Vala lexer
+* Valentin Lorentz -- C++ lexer improvements
+* Ben Mabey -- Gherkin lexer
+* Angus MacArthur -- QML lexer
+* Louis Mandel -- X10 lexer
+* Louis Marchand -- Eiffel lexer
+* Simone Margaritelli -- Hybris lexer
+* Kirk McDonald -- D lexer
+* Gordon McGregor -- SystemVerilog lexer
+* Stephen McKamey -- Duel/JBST lexer
+* Brian McKenna -- F# lexer
+* Charles McLaughlin -- Puppet lexer
+* Kurt McKee -- Tera Term macro lexer
+* Lukas Meuser -- BBCode formatter, Lua lexer
+* Cat Miller -- Pig lexer
+* Paul Miller -- LiveScript lexer
+* Hong Minhee -- HTTP lexer
+* Michael Mior -- Awk lexer
+* Bruce Mitchener -- Dylan lexer rewrite
+* Reuben Morais -- SourcePawn lexer
+* Jon Morton -- Rust lexer
+* Paulo Moura -- Logtalk lexer
+* Mher Movsisyan -- DTD lexer
+* Dejan Muhamedagic -- Crmsh lexer
+* Ana Nelson -- Ragel, ANTLR, R console lexers
+* Kurt Neufeld -- Markdown lexer
+* Nam T. Nguyen -- Monokai style
+* Jesper Noehr -- HTML formatter "anchorlinenos"
+* Mike Nolta -- Julia lexer
+* Jonas Obrist -- BBCode lexer
+* Edward O'Callaghan -- Cryptol lexer
+* David Oliva -- Rebol lexer
+* Pat Pannuto -- nesC lexer
+* Jon Parise -- Protocol buffers and Thrift lexers
+* Benjamin Peterson -- Test suite refactoring
+* Ronny Pfannschmidt -- BBCode lexer
+* Dominik Picheta -- Nimrod lexer
+* Andrew Pinkham -- RTF Formatter Refactoring
+* Clément Prévost -- UrbiScript lexer
+* Tanner Prynn -- cmdline -x option and loading lexers from files
+* Oleh Prypin -- Crystal lexer (based on Ruby lexer)
+* Xidorn Quan -- Web IDL lexer
+* Elias Rabel -- Fortran fixed form lexer
+* raichoo -- Idris lexer
+* Kashif Rasul -- CUDA lexer
+* Nathan Reed -- HLSL lexer
+* Justin Reidy -- MXML lexer
+* Norman Richards -- JSON lexer
+* Corey Richardson -- Rust lexer updates
+* Lubomir Rintel -- GoodData MAQL and CL lexers
+* Andre Roberge -- Tango style
+* Georg Rollinger -- HSAIL lexer
+* Michiel Roos -- TypoScript lexer
+* Konrad Rudolph -- LaTeX formatter enhancements
+* Mario Ruggier -- Evoque lexers
+* Miikka Salminen -- Lovelace style, Hexdump lexer, lexer enhancements
+* Stou Sandalski -- NumPy, FORTRAN, tcsh and XSLT lexers
+* Matteo Sasso -- Common Lisp lexer
+* Joe Schafer -- Ada lexer
+* Ken Schutte -- Matlab lexers
+* René Schwaiger -- Rainbow Dash style
+* Sebastian Schweizer -- Whiley lexer
+* Tassilo Schweyer -- Io, MOOCode lexers
+* Ted Shaw -- AutoIt lexer
+* Joerg Sieker -- ABAP lexer
+* Robert Simmons -- Standard ML lexer
+* Kirill Simonov -- YAML lexer
+* Corbin Simpson -- Monte lexer
+* Alexander Smishlajev -- Visual FoxPro lexer
+* Steve Spigarelli -- XQuery lexer
+* Jerome St-Louis -- eC lexer
+* Camil Staps -- Clean and NuSMV lexers; Solarized style
+* James Strachan -- Kotlin lexer
+* Tom Stuart -- Treetop lexer
+* Colin Sullivan -- SuperCollider lexer
+* Ben Swift -- Extempore lexer
+* Edoardo Tenani -- Arduino lexer
+* Tiberius Teng -- default style overhaul
+* Jeremy Thurgood -- Erlang, Squid config lexers
+* Brian Tiffin -- OpenCOBOL lexer
+* Bob Tolbert -- Hy lexer
+* Matthias Trute -- Forth lexer
+* Erick Tryzelaar -- Felix lexer
+* Alexander Udalov -- Kotlin lexer improvements
+* Thomas Van Doren -- Chapel lexer
+* Daniele Varrazzo -- PostgreSQL lexers
+* Abe Voelker -- OpenEdge ABL lexer
+* Pepijn de Vos -- HTML formatter CTags support
+* Matthias Vallentin -- Bro lexer
+* Benoît Vinot -- AMPL lexer
+* Linh Vu Hong -- RSL lexer
+* Nathan Weizenbaum -- Haml and Sass lexers
+* Nathan Whetsell -- Csound lexers
+* Dietmar Winkler -- Modelica lexer
+* Nils Winter -- Smalltalk lexer
+* Davy Wybiral -- Clojure lexer
+* Whitney Young -- ObjectiveC lexer
+* Diego Zamboni -- CFengine3 lexer
+* Enrique Zamudio -- Ceylon lexer
+* Alex Zimin -- Nemerle lexer
+* Rob Zimmerman -- Kal lexer
+* Vincent Zurczak -- Roboconf lexer
+
+Many thanks for all contributions!

+ 1 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 25 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/LICENSE

@@ -0,0 +1,25 @@
+Copyright (c) 2006-2019 by the respective authors (see AUTHORS file).
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+* Redistributions of source code must retain the above copyright
+  notice, this list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright
+  notice, this list of conditions and the following disclaimer in the
+  documentation and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 48 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/METADATA

@@ -0,0 +1,48 @@
+Metadata-Version: 2.1
+Name: Pygments
+Version: 2.6.1
+Summary: Pygments is a syntax highlighting package written in Python.
+Home-page: https://pygments.org/
+Author: Georg Brandl
+Author-email: georg@python.org
+License: BSD License
+Keywords: syntax highlighting
+Platform: any
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: End Users/Desktop
+Classifier: Intended Audience :: System Administrators
+Classifier: Development Status :: 6 - Mature
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: Programming Language :: Python :: 3.8
+Classifier: Programming Language :: Python :: Implementation :: CPython
+Classifier: Programming Language :: Python :: Implementation :: PyPy
+Classifier: Operating System :: OS Independent
+Classifier: Topic :: Text Processing :: Filters
+Classifier: Topic :: Utilities
+Requires-Python: >=3.5
+
+
+Pygments
+~~~~~~~~
+
+Pygments is a syntax highlighting package written in Python.
+
+It is a generic syntax highlighter suitable for use in code hosting, forums,
+wikis or other applications that need to prettify source code.  Highlights
+are:
+
+* a wide range of over 500 languages and other text formats is supported
+* special attention is paid to details, increasing quality by a fair amount
+* support for new languages and formats are added easily
+* a number of output formats, presently HTML, LaTeX, RTF, SVG, all image     formats that PIL supports and ANSI sequences
+* it is usable as a command-line tool and as a library
+
+:copyright: Copyright 2006-2019 by the Pygments team, see AUTHORS.
+:license: BSD, see LICENSE for details.
+
+

+ 463 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/RECORD

@@ -0,0 +1,463 @@
+../../Scripts/pygmentize.exe,sha256=o1zKlHseriZKeLDM37p8PZP1f725l80RvvdA6ZFlDUc,106371
+Pygments-2.6.1.dist-info/AUTHORS,sha256=PVpa2_Oku6BGuiUvutvuPnWGpzxqFy2I8-NIrqCvqUY,8449
+Pygments-2.6.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+Pygments-2.6.1.dist-info/LICENSE,sha256=RbiNNEnDeAZZR1i_PEhNnZixKx7MFj9lQx_gf-pgJfA,1331
+Pygments-2.6.1.dist-info/METADATA,sha256=nrRK1IyAspMOw3-uN_SPDbJo1XIALm7ZbRV9aui-FtM,1833
+Pygments-2.6.1.dist-info/RECORD,,
+Pygments-2.6.1.dist-info/WHEEL,sha256=p46_5Uhzqz6AzeSosiOnxK-zmFja1i22CrQCjmYe8ec,92
+Pygments-2.6.1.dist-info/entry_points.txt,sha256=NXt9BRDRv6tAfDwqKM0bDHrrxaIt2f1nxH9CwjyjSKc,54
+Pygments-2.6.1.dist-info/top_level.txt,sha256=RjKKqrVIStoebLHdbs0yZ2Lk4rS7cxGguXsLCYvZ2Ak,9
+pygments/__init__.py,sha256=Hmd0jgKuzYTHVCXGJrUr5E7R0tAINEf8WuhKUmC7ITY,3036
+pygments/__main__.py,sha256=JV6RSKzbYgMQHLf0nZGzfq1IXxns2iGunsfkY3jxFKo,372
+pygments/__pycache__/__init__.cpython-38.pyc,,
+pygments/__pycache__/__main__.cpython-38.pyc,,
+pygments/__pycache__/cmdline.cpython-38.pyc,,
+pygments/__pycache__/console.cpython-38.pyc,,
+pygments/__pycache__/filter.cpython-38.pyc,,
+pygments/__pycache__/formatter.cpython-38.pyc,,
+pygments/__pycache__/lexer.cpython-38.pyc,,
+pygments/__pycache__/modeline.cpython-38.pyc,,
+pygments/__pycache__/plugin.cpython-38.pyc,,
+pygments/__pycache__/regexopt.cpython-38.pyc,,
+pygments/__pycache__/scanner.cpython-38.pyc,,
+pygments/__pycache__/sphinxext.cpython-38.pyc,,
+pygments/__pycache__/style.cpython-38.pyc,,
+pygments/__pycache__/token.cpython-38.pyc,,
+pygments/__pycache__/unistring.cpython-38.pyc,,
+pygments/__pycache__/util.cpython-38.pyc,,
+pygments/cmdline.py,sha256=-mJqcK1Cic8Z-z-ITdj0yjN9exJdPew8m9BwHvtesJY,19479
+pygments/console.py,sha256=QF0bQHbGeFRSetc3g5JsmGziVHQqIZCprEwNlZFtiRg,1721
+pygments/filter.py,sha256=hu4Qo6zdyMcIprEL3xmZGb-inVe1_vUKvgY9vdAV5JU,2030
+pygments/filters/__init__.py,sha256=L_K0aapWqkqDPBkMVGoXvp17zsv7ddl0bNQdMsK43tg,11534
+pygments/filters/__pycache__/__init__.cpython-38.pyc,,
+pygments/formatter.py,sha256=Zyz1t_dRczxwuuQkgkwOIOd2TRZpHMbjVHOL_ch37JQ,2917
+pygments/formatters/__init__.py,sha256=d8AnTX9J39ZKoh2YJIiFcOk79h28T0cJ7Yn6rs4G3UI,5107
+pygments/formatters/__pycache__/__init__.cpython-38.pyc,,
+pygments/formatters/__pycache__/_mapping.cpython-38.pyc,,
+pygments/formatters/__pycache__/bbcode.cpython-38.pyc,,
+pygments/formatters/__pycache__/html.cpython-38.pyc,,
+pygments/formatters/__pycache__/img.cpython-38.pyc,,
+pygments/formatters/__pycache__/irc.cpython-38.pyc,,
+pygments/formatters/__pycache__/latex.cpython-38.pyc,,
+pygments/formatters/__pycache__/other.cpython-38.pyc,,
+pygments/formatters/__pycache__/rtf.cpython-38.pyc,,
+pygments/formatters/__pycache__/svg.cpython-38.pyc,,
+pygments/formatters/__pycache__/terminal.cpython-38.pyc,,
+pygments/formatters/__pycache__/terminal256.cpython-38.pyc,,
+pygments/formatters/_mapping.py,sha256=QvLAVzGeldQ6iE8xeGOYtclUBMV0KclGiINcsCaK538,6175
+pygments/formatters/bbcode.py,sha256=_K7UzwyT70snOYAiT3UkItbXRwQYVuTHpr1AZtRHL6Y,3314
+pygments/formatters/html.py,sha256=Eoa4EJxTmE3g0kgMiNN7Ihh5A9lNWUnBlZ1eXFC-yl4,32625
+pygments/formatters/img.py,sha256=iajPfAvg5cB79wS0Mu3pv5Vy2TgghWQcl1OWIz1NcKg,20701
+pygments/formatters/irc.py,sha256=nU9jSjARuRaZpCuCey7bnRwGTGKeCTEhm_yDDYxzKQ8,5869
+pygments/formatters/latex.py,sha256=IOqv1C-LyWs6v2cgecfZ-CkfNNF6VQqcNkPUs3aHUjU,17711
+pygments/formatters/other.py,sha256=Qfc5OixOxM7YEy0d0NJBT750ukj-uPyhxKtHGTm0Vlc,5140
+pygments/formatters/rtf.py,sha256=z8LTTuEXwx3hpLaG0qeJumZCkUfseLIBsxhZE-0tEKg,5050
+pygments/formatters/svg.py,sha256=QUPMQIhXN4JA6auaUTj6z6JaeffyF7OpVoQ8IENptCo,7279
+pygments/formatters/terminal.py,sha256=q0QuanTWnUr4fuNuxnSnjLwjlyUJSMXqBK58MZCAk8Q,4662
+pygments/formatters/terminal256.py,sha256=x9n-YSOwDZhOaLGmYKLO259ZNBCqSydm_KxZJh2Q-Eg,11126
+pygments/lexer.py,sha256=FBy1KBXYiwf1TYtXN25OSrnLGbm9oAWXCsq6ReqtvNA,31559
+pygments/lexers/__init__.py,sha256=3dDjsioYkLz3fRbX3gV9xoi-SkpRRCtYurrWrylAZCo,11310
+pygments/lexers/__pycache__/__init__.cpython-38.pyc,,
+pygments/lexers/__pycache__/_asy_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_cl_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_cocoa_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_csound_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_lasso_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_lua_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_mapping.cpython-38.pyc,,
+pygments/lexers/__pycache__/_mql_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_openedge_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_php_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_postgres_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_scilab_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_sourcemod_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_stan_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_stata_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_tsql_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_usd_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_vbscript_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/_vim_builtins.cpython-38.pyc,,
+pygments/lexers/__pycache__/actionscript.cpython-38.pyc,,
+pygments/lexers/__pycache__/agile.cpython-38.pyc,,
+pygments/lexers/__pycache__/algebra.cpython-38.pyc,,
+pygments/lexers/__pycache__/ambient.cpython-38.pyc,,
+pygments/lexers/__pycache__/ampl.cpython-38.pyc,,
+pygments/lexers/__pycache__/apl.cpython-38.pyc,,
+pygments/lexers/__pycache__/archetype.cpython-38.pyc,,
+pygments/lexers/__pycache__/asm.cpython-38.pyc,,
+pygments/lexers/__pycache__/automation.cpython-38.pyc,,
+pygments/lexers/__pycache__/basic.cpython-38.pyc,,
+pygments/lexers/__pycache__/bibtex.cpython-38.pyc,,
+pygments/lexers/__pycache__/boa.cpython-38.pyc,,
+pygments/lexers/__pycache__/business.cpython-38.pyc,,
+pygments/lexers/__pycache__/c_cpp.cpython-38.pyc,,
+pygments/lexers/__pycache__/c_like.cpython-38.pyc,,
+pygments/lexers/__pycache__/capnproto.cpython-38.pyc,,
+pygments/lexers/__pycache__/chapel.cpython-38.pyc,,
+pygments/lexers/__pycache__/clean.cpython-38.pyc,,
+pygments/lexers/__pycache__/compiled.cpython-38.pyc,,
+pygments/lexers/__pycache__/configs.cpython-38.pyc,,
+pygments/lexers/__pycache__/console.cpython-38.pyc,,
+pygments/lexers/__pycache__/crystal.cpython-38.pyc,,
+pygments/lexers/__pycache__/csound.cpython-38.pyc,,
+pygments/lexers/__pycache__/css.cpython-38.pyc,,
+pygments/lexers/__pycache__/d.cpython-38.pyc,,
+pygments/lexers/__pycache__/dalvik.cpython-38.pyc,,
+pygments/lexers/__pycache__/data.cpython-38.pyc,,
+pygments/lexers/__pycache__/diff.cpython-38.pyc,,
+pygments/lexers/__pycache__/dotnet.cpython-38.pyc,,
+pygments/lexers/__pycache__/dsls.cpython-38.pyc,,
+pygments/lexers/__pycache__/dylan.cpython-38.pyc,,
+pygments/lexers/__pycache__/ecl.cpython-38.pyc,,
+pygments/lexers/__pycache__/eiffel.cpython-38.pyc,,
+pygments/lexers/__pycache__/elm.cpython-38.pyc,,
+pygments/lexers/__pycache__/email.cpython-38.pyc,,
+pygments/lexers/__pycache__/erlang.cpython-38.pyc,,
+pygments/lexers/__pycache__/esoteric.cpython-38.pyc,,
+pygments/lexers/__pycache__/ezhil.cpython-38.pyc,,
+pygments/lexers/__pycache__/factor.cpython-38.pyc,,
+pygments/lexers/__pycache__/fantom.cpython-38.pyc,,
+pygments/lexers/__pycache__/felix.cpython-38.pyc,,
+pygments/lexers/__pycache__/floscript.cpython-38.pyc,,
+pygments/lexers/__pycache__/forth.cpython-38.pyc,,
+pygments/lexers/__pycache__/fortran.cpython-38.pyc,,
+pygments/lexers/__pycache__/foxpro.cpython-38.pyc,,
+pygments/lexers/__pycache__/freefem.cpython-38.pyc,,
+pygments/lexers/__pycache__/functional.cpython-38.pyc,,
+pygments/lexers/__pycache__/go.cpython-38.pyc,,
+pygments/lexers/__pycache__/grammar_notation.cpython-38.pyc,,
+pygments/lexers/__pycache__/graph.cpython-38.pyc,,
+pygments/lexers/__pycache__/graphics.cpython-38.pyc,,
+pygments/lexers/__pycache__/haskell.cpython-38.pyc,,
+pygments/lexers/__pycache__/haxe.cpython-38.pyc,,
+pygments/lexers/__pycache__/hdl.cpython-38.pyc,,
+pygments/lexers/__pycache__/hexdump.cpython-38.pyc,,
+pygments/lexers/__pycache__/html.cpython-38.pyc,,
+pygments/lexers/__pycache__/idl.cpython-38.pyc,,
+pygments/lexers/__pycache__/igor.cpython-38.pyc,,
+pygments/lexers/__pycache__/inferno.cpython-38.pyc,,
+pygments/lexers/__pycache__/installers.cpython-38.pyc,,
+pygments/lexers/__pycache__/int_fiction.cpython-38.pyc,,
+pygments/lexers/__pycache__/iolang.cpython-38.pyc,,
+pygments/lexers/__pycache__/j.cpython-38.pyc,,
+pygments/lexers/__pycache__/javascript.cpython-38.pyc,,
+pygments/lexers/__pycache__/julia.cpython-38.pyc,,
+pygments/lexers/__pycache__/jvm.cpython-38.pyc,,
+pygments/lexers/__pycache__/lisp.cpython-38.pyc,,
+pygments/lexers/__pycache__/make.cpython-38.pyc,,
+pygments/lexers/__pycache__/markup.cpython-38.pyc,,
+pygments/lexers/__pycache__/math.cpython-38.pyc,,
+pygments/lexers/__pycache__/matlab.cpython-38.pyc,,
+pygments/lexers/__pycache__/mime.cpython-38.pyc,,
+pygments/lexers/__pycache__/ml.cpython-38.pyc,,
+pygments/lexers/__pycache__/modeling.cpython-38.pyc,,
+pygments/lexers/__pycache__/modula2.cpython-38.pyc,,
+pygments/lexers/__pycache__/monte.cpython-38.pyc,,
+pygments/lexers/__pycache__/mosel.cpython-38.pyc,,
+pygments/lexers/__pycache__/ncl.cpython-38.pyc,,
+pygments/lexers/__pycache__/nimrod.cpython-38.pyc,,
+pygments/lexers/__pycache__/nit.cpython-38.pyc,,
+pygments/lexers/__pycache__/nix.cpython-38.pyc,,
+pygments/lexers/__pycache__/oberon.cpython-38.pyc,,
+pygments/lexers/__pycache__/objective.cpython-38.pyc,,
+pygments/lexers/__pycache__/ooc.cpython-38.pyc,,
+pygments/lexers/__pycache__/other.cpython-38.pyc,,
+pygments/lexers/__pycache__/parasail.cpython-38.pyc,,
+pygments/lexers/__pycache__/parsers.cpython-38.pyc,,
+pygments/lexers/__pycache__/pascal.cpython-38.pyc,,
+pygments/lexers/__pycache__/pawn.cpython-38.pyc,,
+pygments/lexers/__pycache__/perl.cpython-38.pyc,,
+pygments/lexers/__pycache__/php.cpython-38.pyc,,
+pygments/lexers/__pycache__/pony.cpython-38.pyc,,
+pygments/lexers/__pycache__/praat.cpython-38.pyc,,
+pygments/lexers/__pycache__/prolog.cpython-38.pyc,,
+pygments/lexers/__pycache__/python.cpython-38.pyc,,
+pygments/lexers/__pycache__/qvt.cpython-38.pyc,,
+pygments/lexers/__pycache__/r.cpython-38.pyc,,
+pygments/lexers/__pycache__/rdf.cpython-38.pyc,,
+pygments/lexers/__pycache__/rebol.cpython-38.pyc,,
+pygments/lexers/__pycache__/resource.cpython-38.pyc,,
+pygments/lexers/__pycache__/ride.cpython-38.pyc,,
+pygments/lexers/__pycache__/rnc.cpython-38.pyc,,
+pygments/lexers/__pycache__/roboconf.cpython-38.pyc,,
+pygments/lexers/__pycache__/robotframework.cpython-38.pyc,,
+pygments/lexers/__pycache__/ruby.cpython-38.pyc,,
+pygments/lexers/__pycache__/rust.cpython-38.pyc,,
+pygments/lexers/__pycache__/sas.cpython-38.pyc,,
+pygments/lexers/__pycache__/scdoc.cpython-38.pyc,,
+pygments/lexers/__pycache__/scripting.cpython-38.pyc,,
+pygments/lexers/__pycache__/sgf.cpython-38.pyc,,
+pygments/lexers/__pycache__/shell.cpython-38.pyc,,
+pygments/lexers/__pycache__/sieve.cpython-38.pyc,,
+pygments/lexers/__pycache__/slash.cpython-38.pyc,,
+pygments/lexers/__pycache__/smalltalk.cpython-38.pyc,,
+pygments/lexers/__pycache__/smv.cpython-38.pyc,,
+pygments/lexers/__pycache__/snobol.cpython-38.pyc,,
+pygments/lexers/__pycache__/solidity.cpython-38.pyc,,
+pygments/lexers/__pycache__/special.cpython-38.pyc,,
+pygments/lexers/__pycache__/sql.cpython-38.pyc,,
+pygments/lexers/__pycache__/stata.cpython-38.pyc,,
+pygments/lexers/__pycache__/supercollider.cpython-38.pyc,,
+pygments/lexers/__pycache__/tcl.cpython-38.pyc,,
+pygments/lexers/__pycache__/templates.cpython-38.pyc,,
+pygments/lexers/__pycache__/teraterm.cpython-38.pyc,,
+pygments/lexers/__pycache__/testing.cpython-38.pyc,,
+pygments/lexers/__pycache__/text.cpython-38.pyc,,
+pygments/lexers/__pycache__/textedit.cpython-38.pyc,,
+pygments/lexers/__pycache__/textfmts.cpython-38.pyc,,
+pygments/lexers/__pycache__/theorem.cpython-38.pyc,,
+pygments/lexers/__pycache__/trafficscript.cpython-38.pyc,,
+pygments/lexers/__pycache__/typoscript.cpython-38.pyc,,
+pygments/lexers/__pycache__/unicon.cpython-38.pyc,,
+pygments/lexers/__pycache__/urbi.cpython-38.pyc,,
+pygments/lexers/__pycache__/usd.cpython-38.pyc,,
+pygments/lexers/__pycache__/varnish.cpython-38.pyc,,
+pygments/lexers/__pycache__/verification.cpython-38.pyc,,
+pygments/lexers/__pycache__/web.cpython-38.pyc,,
+pygments/lexers/__pycache__/webidl.cpython-38.pyc,,
+pygments/lexers/__pycache__/webmisc.cpython-38.pyc,,
+pygments/lexers/__pycache__/whiley.cpython-38.pyc,,
+pygments/lexers/__pycache__/x10.cpython-38.pyc,,
+pygments/lexers/__pycache__/xorg.cpython-38.pyc,,
+pygments/lexers/__pycache__/zig.cpython-38.pyc,,
+pygments/lexers/_asy_builtins.py,sha256=zO_y8v-bp6kjlIwvbmse79qY8P7qAUhoVObaX9Qy3S8,27311
+pygments/lexers/_cl_builtins.py,sha256=x-mRhM6ukZv0pxYtqCq7SlsezhL8L9fpcCQ-gou0Z9w,14018
+pygments/lexers/_cocoa_builtins.py,sha256=h8CT9tpHtyg7PF1tQsLj0NCoN6o9TzzIOBA5UTadEQs,39962
+pygments/lexers/_csound_builtins.py,sha256=6OzL4rwy_qgAenQ8uN0n2u39NlfoVMSZEOy4tdLNHOE,17619
+pygments/lexers/_lasso_builtins.py,sha256=1jR-3eDhf1CUcPSSEXgbJMymAkQaJqpWIPjYM4rL6Sk,134534
+pygments/lexers/_lua_builtins.py,sha256=VkZNUZW9_lTBBcTFOl8NDepWTBfwha6tkV8BXR1EzaM,8297
+pygments/lexers/_mapping.py,sha256=KQJCLXu0jKEF8g9N8Ysg3giPGOFByjdgiOIEMA1yoqw,58920
+pygments/lexers/_mql_builtins.py,sha256=MS7566jpdiud7gEa_y4iJpHLkqjpo-7Y8WwB9MyMUhY,24737
+pygments/lexers/_openedge_builtins.py,sha256=hCqbIZd_qcBTlLyQGME8mqijUDCIm5P9HtIsv8JCEG8,48362
+pygments/lexers/_php_builtins.py,sha256=6-lirHeDcuo-5U5Yh9D4Fgx0sGmN2gq_dmCzjY3wEQg,154390
+pygments/lexers/_postgres_builtins.py,sha256=OI0j7i72gKoNGJomATjK_P00D7cVT6bpPqeeSB4k0aM,11210
+pygments/lexers/_scilab_builtins.py,sha256=ffgN-8Jj5KLRIwdD9j51zvScUbim2DaYv9WmF8_JCNA,52401
+pygments/lexers/_sourcemod_builtins.py,sha256=BSnleYNoGBZ0IrTJz17r-Z9yz7OHwgJVy1oRvvqXg38,27074
+pygments/lexers/_stan_builtins.py,sha256=BfSr_PiG5QE0-7hUfX4g_jdwugKf1zWtGE2w33FotvA,10481
+pygments/lexers/_stata_builtins.py,sha256=rZ8lopR_vKuDCBeCF9oPf71sHkD6n-tN6T5QpyOVEg4,25228
+pygments/lexers/_tsql_builtins.py,sha256=5qrkZJHk_m1SgTnhCrKp5jXJxexjCaf4GtRkY5_PTEA,15484
+pygments/lexers/_usd_builtins.py,sha256=eiB5M8wMXpllmheEzX3BuId6zXGQWaMcJs92E114P7U,1514
+pygments/lexers/_vbscript_builtins.py,sha256=chotaveFeFC-A6qcRAghQC7fAwrDmV-BKE_TW-hrZwk,4249
+pygments/lexers/_vim_builtins.py,sha256=Il_pjrP0PWUjMLCRPOZPoLgd_3jauvv9SGtBOkzmU2A,57090
+pygments/lexers/actionscript.py,sha256=jQTpfKe0OwRQTknMs132_WhqEDIW7lQbLW0HU5D0cOs,11181
+pygments/lexers/agile.py,sha256=0yI_Bq_-ekqFCiMzkcnJfNQ12iyA4QmPk70RCfl1Xa0,900
+pygments/lexers/algebra.py,sha256=vMjSoC9CgSWUMoaNu7gysQDdAc46t_Y6U4dX2mEzNCc,7201
+pygments/lexers/ambient.py,sha256=1_B2JkmFVgGq-JuEhmrXIu-q5WP2e7Ir5DSpO7qXN9E,2557
+pygments/lexers/ampl.py,sha256=HWeNZxYsNhPuGmW1lgNUxMe5zMtbMQ-xNFoj9oVOvq8,4123
+pygments/lexers/apl.py,sha256=gzIuS7p2Qz-pN5M0i45uvDow_gsNNus5k6zrwe19M9c,3174
+pygments/lexers/archetype.py,sha256=luJBCChBsH6fdJOboz5pVTSNCHh7miLd1xtLnI7TH88,11136
+pygments/lexers/asm.py,sha256=0gX4w_cbEbWWUp0kBla41r0ZMAY8NUxaaeW-faRRiqE,39356
+pygments/lexers/automation.py,sha256=9oR495kiyEbl-ev7PWF4Mw-jvtuSbOkmKJRmOvUzQb8,19640
+pygments/lexers/basic.py,sha256=siXk3fQfTEfJNeSW2sI-rfssoUpyj7drMdMrs5csYrs,27576
+pygments/lexers/bibtex.py,sha256=fxbIyhfV1yrFfd7oyAp-fyss27T0Bfv8VqRdVnLg63Y,4725
+pygments/lexers/boa.py,sha256=OB_W242mpr2vwbhg0MO4BpZcdhjaXuM6ffQ54zn3-ZI,3942
+pygments/lexers/business.py,sha256=onAZDADHM7atSEFKsmODBvPc8GlOTDwPgfsh_hVGXNI,27666
+pygments/lexers/c_cpp.py,sha256=dfPzKNoZeqoprqM4a7pTqWin7scz6VYg5_q9MzbnjH0,10638
+pygments/lexers/c_like.py,sha256=UusXq2S5d0v0CpGsxkmVludmu58WsLZQHWnsb0YwhK4,25080
+pygments/lexers/capnproto.py,sha256=pC3zXFSfYFHEIBq3OqLPGKl71K5HtdWnAEqMz6n8KFY,2194
+pygments/lexers/chapel.py,sha256=qzglX8OKh7aaUgXqAVXFkerNjTMIJKdMbihP_o2VFWk,3908
+pygments/lexers/clean.py,sha256=XG0_2KVyxbRFp-_U5HgT1wN9srL522kOe_9T51HeQmA,6362
+pygments/lexers/compiled.py,sha256=iGwVkCJ-SXoovHegOBSnOG518hHkDudegb9_qS-8vW0,1385
+pygments/lexers/configs.py,sha256=YO1NWPpNENDBN4fr-yBYdBWwIqotSquTeaKfY24a7mk,32127
+pygments/lexers/console.py,sha256=tj_ARAplXlqt2sGb2ycxsOy8xIL4NCAMOd3bZ0Zjojg,4120
+pygments/lexers/crystal.py,sha256=hTz20yWrjuam9JVG9Xxr6I7x50M_sIlfdBs0_gg5hKQ,16845
+pygments/lexers/csound.py,sha256=_OqMoEegHcp0NV7zuiLt6h_aY17adQRlJe1DG-pQP4M,16739
+pygments/lexers/css.py,sha256=EvJYfYeaT-TkDHpqXc4ssz0BoXSnBfWT3Vb9MKCeNQg,31467
+pygments/lexers/d.py,sha256=ZWc365C_vRjee-ATfen2OmHCFW3QMoMNE9JEeTplk94,9686
+pygments/lexers/dalvik.py,sha256=tAoPPa_iRXhWG_MzslSvBE99NlGnkx0WKnwdDQ3XU9o,4420
+pygments/lexers/data.py,sha256=_-dqdUSm7Edmh-zaljABFodHjwMCECW1Y-IlyjnWibM,19072
+pygments/lexers/diff.py,sha256=8jKEVtSA2YKprutpONqFvMKBhK1U_IFdxaScTuRNeU4,4873
+pygments/lexers/dotnet.py,sha256=oGL8kWok74zOLn92iwjOYX6Do9tnk96-YTFlJSdkBaQ,27582
+pygments/lexers/dsls.py,sha256=qv0kHmfQOHkHMnXyFbgYrDjUpsCkuXPLig1hma2zcJ0,35837
+pygments/lexers/dylan.py,sha256=LkWTiLsU561_VQL-PUirryEt7ewbseLRJfN-H1twmiA,10402
+pygments/lexers/ecl.py,sha256=5ivxyk5lzMottCuIxyE7DBvWYJV5KTuaHNRkvOtgM7c,5875
+pygments/lexers/eiffel.py,sha256=He2DwoUqWqMt8_PDzoP3NuBl9AZ9K3_SmpGkIgSzWuI,2482
+pygments/lexers/elm.py,sha256=91CM_h3PPoBLLm2stJqNZi3lgjhZH7NvzNKWdXAe8CA,2997
+pygments/lexers/email.py,sha256=ap9imSi6jbbP7gPBAyc3rcNurVDSNmRKIWv0ByR6VOQ,5207
+pygments/lexers/erlang.py,sha256=bZBqAKFa-EsRpvai0EpwZkKMPCd2q6pDfZapq9gh9Qg,18985
+pygments/lexers/esoteric.py,sha256=I7YEPnQdftxEOasCec8_dxVr7zgypMtoYtds0v2srNQ,9489
+pygments/lexers/ezhil.py,sha256=R26b4iXSpdMkgXewJN2INhJXL0ICXhW2o9fu3bn078U,3020
+pygments/lexers/factor.py,sha256=nBYhJoNLkSxtshGrF08tSQKUq_TtgVp1ukKX4Zromm8,17864
+pygments/lexers/fantom.py,sha256=3OTJDka8qeNRykM1Ki1Lyek6gd-jqOa-l5IgRbX8kSg,9982
+pygments/lexers/felix.py,sha256=DoSGdEntZgG3JUbeBA9fqUtg3lODbqwY3_XS6EIfXt4,9408
+pygments/lexers/floscript.py,sha256=eza4Rw3RI3mFjIIAA2czmi2SlgbcSI1T8pNr7vUd0eY,2667
+pygments/lexers/forth.py,sha256=Yqm9z-PjymjQjaleCW-SNJdCCc_NWeFXMz_XvjtAStI,7179
+pygments/lexers/fortran.py,sha256=XqwbZg25atjNDN8yUnqkxm1nfqbzSgZDqmKUIFNQSHk,9841
+pygments/lexers/foxpro.py,sha256=i1B6wX4U5oY8FJO5BGtTR0RaVWbO6P45PXxndi5HcpE,26236
+pygments/lexers/freefem.py,sha256=bYEPIZ1mysE2Ub9WO8NPHefz-CaGqPiE0WbHZeMHPsQ,27086
+pygments/lexers/functional.py,sha256=gJqzgp1ujTa8Zk5hjzXdutz8vvSJpRxhqTVCkK03Ij0,698
+pygments/lexers/go.py,sha256=aRdc0lsKbF7xxTcUnu35m-_e3SD7s2eBAllq1y7_qY8,3701
+pygments/lexers/grammar_notation.py,sha256=0INMOPjLnMohU0QCUIvBaJdty7A7i1hS4ZItB4ehPnA,7941
+pygments/lexers/graph.py,sha256=v013Gzn_RIuLrEz_DJuUah_vCpv6aVSMZpHGov19BMY,2756
+pygments/lexers/graphics.py,sha256=xfr7jZ_JF81kh-RFxIFSKOa06W4z0YxWzOxXAmrLwMA,38259
+pygments/lexers/haskell.py,sha256=VXzPclm0SiawKT2E4L4ZO8uPKcNYMVDWKY4NPcLKFsg,32245
+pygments/lexers/haxe.py,sha256=uWeORmR1BBCtA_HKRJIhzl26GfkzxzVd7c8or-REw7s,30959
+pygments/lexers/hdl.py,sha256=Xpf_1SJ-Uwf94J6MK_C5wR7JyXQkDKtlNdJ7MLL6uzs,18179
+pygments/lexers/hexdump.py,sha256=7y6XhpOGaVfbtWPSzFxgen8u4sr9sWCbnRUTmvnW1KI,3507
+pygments/lexers/html.py,sha256=B-XSH62dR1GZSJ6E3rDOoF6WO-FcKAnrCqTYvvm8jow,19280
+pygments/lexers/idl.py,sha256=hg7CnizaVt7br6ydWkt4VU9UMNax7gg4ToA3_rnqM1M,14986
+pygments/lexers/igor.py,sha256=FP_3Uz06p1emRB1BqpJ_11KY5k38D5nBLP9nFLnXsHA,30917
+pygments/lexers/inferno.py,sha256=iB07whrTd_qnsABOUalv999QhFYB2nhIHfTp_ECsTxM,3117
+pygments/lexers/installers.py,sha256=QVPOqFLmDydPhBJYmQcyjq6XQvcPb1Hxhpbv5JvgL-M,12866
+pygments/lexers/int_fiction.py,sha256=-jBktm0onIUz_hzsP0lUd3g9aLXJ4KLls0gjIwSB46o,55779
+pygments/lexers/iolang.py,sha256=Sv9qzhNgvVz1xmStZOLm3KTvlcI2A1zywAWQTo6ahs0,1905
+pygments/lexers/j.py,sha256=2wqBgvkxF99yBTdyslEsaeweZuqNO_yNZPjTKRwNTdo,4527
+pygments/lexers/javascript.py,sha256=cOcijZB6rFr1aclYt94LHInEKs1KgZZ4Xg4i2zDvW28,60194
+pygments/lexers/julia.py,sha256=ObRU-RjNe_N6zcQZgq5nws526X_j_4c4KPUFwwROFns,14179
+pygments/lexers/jvm.py,sha256=Qsg2PugXHCD55g_w4GVI4FDFCfOBICYW70xKhWMfNiQ,70347
+pygments/lexers/lisp.py,sha256=oUWEXl8czd_ovmKgkROzATeDjy01jPXAne18zXtEYRY,143609
+pygments/lexers/make.py,sha256=dbnhkZWxESvkvV69TrQEZYdo4yiUGoBBIE-VpXX1uBM,7326
+pygments/lexers/markup.py,sha256=6ACdRUnjI6CGRwes8szHfUjZU-nR7C42y2dbP5EdJeI,20704
+pygments/lexers/math.py,sha256=74YS-Z0zpBP6JYk1fsauYbW7XeZ-XPDTqKakbkX0v1Y,700
+pygments/lexers/matlab.py,sha256=23FUS7UgeE9c0gPr9xnyIBz_0Qr7f8ks8DCumF0fGdU,30403
+pygments/lexers/mime.py,sha256=hf-dShZ8AUSIzTELUEnlel7gnZLZpiOd-OFehEDSba0,7975
+pygments/lexers/ml.py,sha256=SV44RnHSqsCQX7wZHZe_bJtzl0hTFrlY5UF8nhO9ozU,31376
+pygments/lexers/modeling.py,sha256=n4gorBPf3gttlsITHGYeOnrUjUWz3nCh5oLYkDMOnrM,13409
+pygments/lexers/modula2.py,sha256=zenAwJk17hVa1FnOTZHJAwLrDrmcurxu4yw7pUoa_Qk,52561
+pygments/lexers/monte.py,sha256=tIn0lsLdG0iHRX_01KI9OkR4iazyiV5F8H3OlkKdFZQ,6307
+pygments/lexers/mosel.py,sha256=N8J6mCnzTUd4KADnhMAAQ2X5OZGxXI-i2Xvq8HfzjNA,9211
+pygments/lexers/ncl.py,sha256=0U8xDdO0guIlnQKCHKmKQPXv91Jqy1YvrkNoMonaYp4,63986
+pygments/lexers/nimrod.py,sha256=ERUF4NVMUlbirF_FvN8EIXXFRv6RJqchq4rr9vugHPI,5174
+pygments/lexers/nit.py,sha256=FSQCdLNjKUrw_pisiCH-m15EQcz30lv6wvvbTgkrB-Y,2743
+pygments/lexers/nix.py,sha256=RTgXFxL2niA9iG1zLHRWdNZy70he_vE1D0-FcoU1cfw,4031
+pygments/lexers/oberon.py,sha256=HMOnehgSbLaTV6l1e5b44aZttyE2YIfA2hzyj6MW5xU,3733
+pygments/lexers/objective.py,sha256=FA7gniip1eEDC9x1UIvdI8flRtFxehTHId0MlqB0llo,22789
+pygments/lexers/ooc.py,sha256=lP6KSoWFrq9Q7w5F_aRSaLYUryh4nuBcPfnUkwyBQsU,2999
+pygments/lexers/other.py,sha256=0xuOYQ0uI9eLONFTNBv2e-hltZhQcN531NVi7e2AcQQ,1768
+pygments/lexers/parasail.py,sha256=YEgpP3B62qHYOBFcoChOfgzATczrSPj1WyovIgqW3gg,2737
+pygments/lexers/parsers.py,sha256=fhTyqwzifEpFFfW8emQ9WYYBwlUs48Sv_qykCUQoWHE,27590
+pygments/lexers/pascal.py,sha256=YpIQHj54lSJrBFdWSo_nkV8M_dYHfJyJMjLk6W6UNZY,32624
+pygments/lexers/pawn.py,sha256=LN0m73AC00wHyvBlbTPU1k2ihBdmDkfIFq24uAWvsF0,8021
+pygments/lexers/perl.py,sha256=Plh4ovtDulyq5oxJTIijQlJ8Td5ga7s3uQ0sbV7uES8,39155
+pygments/lexers/php.py,sha256=yU7DdvXBQlnEvX6WBb7c9kgSw9COwYp6psvzGmCebs4,10834
+pygments/lexers/pony.py,sha256=h6S-MGKN7q7sk869oWjC1OcgV7zwXloYnGFshhTFxHk,3269
+pygments/lexers/praat.py,sha256=aFOD7K8wEVjcr4Jb3DAGn5AmjhMDSHY8pVC4WQfjGlc,12292
+pygments/lexers/prolog.py,sha256=TNj3F1ossufZ_XKVVrWJlRtPDRU1ExGO6NS0-TBq7gw,12405
+pygments/lexers/python.py,sha256=7VkiN5v5IAIL9bDQGdwtmt2__plhedbEi3rzh397Nec,51187
+pygments/lexers/qvt.py,sha256=_lXPT5SdDEqhCmuq4TcO9JRrP703kIT3a1Y_ZW9NTCY,6097
+pygments/lexers/r.py,sha256=VGb5x89r844B-a_V49wAwu8i0245tbdyLKZWq_wRG74,6276
+pygments/lexers/rdf.py,sha256=RAerwJHNjrtXXtua4UXRfUQkMQ36uqfQZlSj63yoQA8,14608
+pygments/lexers/rebol.py,sha256=3bhOFMMneP38O9aJFjPZlNTS6cwbcnDlJaDbfvF4x1g,18624
+pygments/lexers/resource.py,sha256=xbAErtO3-d4LQJJPnLfhD7Kxz_NVQp4WiYrFu52UX-o,2926
+pygments/lexers/ride.py,sha256=lMlEAtdFILb1vd2WC17UaNwFJqOKb1du7EPG5jwX3Xk,5074
+pygments/lexers/rnc.py,sha256=OxpGllFDAM6Vn_alGiaEKMzQDoqRCrl82ocOO4s6L_k,1990
+pygments/lexers/roboconf.py,sha256=9eZkX5xkajimTV1F5wr0Y8QHPfuEB659Lde8H5AzFfM,2070
+pygments/lexers/robotframework.py,sha256=R0x05_jTPu9bErGS4v_mh-9kyCOG4g4GC-KUvxYkSKo,18646
+pygments/lexers/ruby.py,sha256=rqBelW7OJZIP-J3MVPgQzhXTh3Ey41MjMmpbGQDv390,22168
+pygments/lexers/rust.py,sha256=auhHzIX7VaYzgkj26USy9ZH5DZbPQ1LJYW7YDQB8Wgs,7844
+pygments/lexers/sas.py,sha256=guELd_4GLI1fhZr3Sxtn80Gt6s6ViYFf4jWnK23zzDc,9449
+pygments/lexers/scdoc.py,sha256=raoQeCR0E6sjvT56Lar0Wxc_1u6fB-gFjptjT0jE56g,1983
+pygments/lexers/scripting.py,sha256=kH-Kezddx8HzQMgA2z1ZRB-lcvc9qVyEvZnVjJ_YUBU,69759
+pygments/lexers/sgf.py,sha256=R5Zqd5oVOyUd-NewEXMmACaEO5RX_F7eYUZaJXGTY4g,2024
+pygments/lexers/shell.py,sha256=00dGjndFJ6ZWZzsfKW3nKjIKG-CBwTHH-VYQQs57700,33870
+pygments/lexers/sieve.py,sha256=79MOyJl8iAuvzhphpK-Qu_oybyFTdgzrP6d1Hj9-Lqc,2313
+pygments/lexers/slash.py,sha256=WN2f0VirklPe6djATJtbNMkFGRiuIykKZjqG19Rlgk8,8522
+pygments/lexers/smalltalk.py,sha256=xwRETRB2O_cKHZU9w18QXZpiz87WOw0lULDhMxc9xnA,7215
+pygments/lexers/smv.py,sha256=rbXxBG2rGtm7oPP_Il6v3BbUH3i5q4RtiDaweeN7fLA,2793
+pygments/lexers/snobol.py,sha256=YFOOuPk4yBxg6stlIm6R3UiUgzkMjz06ac7dW3LRxNk,2756
+pygments/lexers/solidity.py,sha256=fW_aQc_HyRawyStUxllYhUn-NYJPCqzDH-ABWTeKcOI,3255
+pygments/lexers/special.py,sha256=N0msqSMphQf0_7Vx9T7kABoHx_KkYLHUxP2FcyYmshg,3149
+pygments/lexers/sql.py,sha256=7evoMDWdBz0kXPIt1jy0YXrQ9KJFYnjN2cslkDrfB88,31823
+pygments/lexers/stata.py,sha256=E46GbEy8ET3yBw1l3KQLSknKW3_qS6Sq3V_hkpVotn0,6459
+pygments/lexers/supercollider.py,sha256=llVW-HUi7m4MNGy4wEp8bF2BJGTXdwF0oNfJfJ_sI8M,3516
+pygments/lexers/tcl.py,sha256=ORf0CBXHwC2MFBpZpcK2sPBCCTyJ3rcwcYOIhN9s0AI,5398
+pygments/lexers/templates.py,sha256=AE6yF5ohbqy52-rn8xUJ5A6OZCkoIs72j7wUnwp25vE,73612
+pygments/lexers/teraterm.py,sha256=2DdFVGyKIF85efcB5QdqqQQNGjqRHoWzVc5psdhSD7c,6310
+pygments/lexers/testing.py,sha256=JfFVWAh_8zaqaiPrewb3CGTmGNuHu6hFR4dtvcFCYRE,10753
+pygments/lexers/text.py,sha256=7cwhjV2GwLRH0CPjlOb7PLVa6XEiRQhDNFU1VO3KNjE,1030
+pygments/lexers/textedit.py,sha256=7F9f0-pAsorZpaFalHOZz5124fsdHCLTAWX_YuwA9XE,6092
+pygments/lexers/textfmts.py,sha256=Ctq-u_o2HVb-xvvsKfpuwkgNzVCNxXJHkirqhpsC8lE,15184
+pygments/lexers/theorem.py,sha256=c51ir2FdsyczFRu059z9wKFZplBETdhwWuWX0Y9wMtM,18908
+pygments/lexers/trafficscript.py,sha256=BYTyTAlD4oDVZ9D1aRrmy4zIC4VJ_n2Lgkgq92DxeJM,1546
+pygments/lexers/typoscript.py,sha256=Leb81-51KKuK9FHoo1xKWJGPqTIsyVoeZkGcsK5tQzU,8224
+pygments/lexers/unicon.py,sha256=xo0E3hnBW0gbdszL6n96Cdzume3l1DI7scgkIQ8koaw,18001
+pygments/lexers/urbi.py,sha256=Zq3PCTC-KI7QYuLZ7NSdikm9-MrAhrYH9DGXVSTT89I,5750
+pygments/lexers/usd.py,sha256=uEPjTqbXu0Ag_qJOB9IEwAGj4-R8_5yBbNRlUPtSlbY,3487
+pygments/lexers/varnish.py,sha256=Y2t_JY7uVz6pH3UvlpIvuaxurH4gRiQrP4Esqw5jPnk,7265
+pygments/lexers/verification.py,sha256=rN6OD2ary21XXvnzUTjknibiM9oF9YjxmLyC7iG6kuo,3932
+pygments/lexers/web.py,sha256=4thoq-m_kGixnDR2baWwN5eEqpFAeH3aRaOMK4J_GOE,918
+pygments/lexers/webidl.py,sha256=TTHSlvRlmdpMPNCMvrrUULY6Y6Q7l53HMR9CGyisq9I,10473
+pygments/lexers/webmisc.py,sha256=pJUyS7bcr77iHQshVzllZmIaZQoVkdGZi1D3FqlJEg0,40054
+pygments/lexers/whiley.py,sha256=J9ZuO8Yv9DYl9Mb6IHyZz2zguGxZXBKxTSwDcxaii8o,4012
+pygments/lexers/x10.py,sha256=Lu35QT0l-objbi6mCm-rxZU_7gO1rZQhjA6JnZ-EBRI,1965
+pygments/lexers/xorg.py,sha256=FDN0czbxMD6YDOqwL6ltspElwMoxxNVKW11OL--keQY,887
+pygments/lexers/zig.py,sha256=C3kbdZ_rJUb0hMK61UiFsjzJVvC_QIPJZ6glZDNPi78,4147
+pygments/modeline.py,sha256=ctgJHLjLF23gklYyo7Nz6P3I3Z8ArewlT5R2n2KNatQ,1010
+pygments/plugin.py,sha256=QFSBZcOqSJqAVQnydwDg8_LG7GzkxUgWjb0FzqoQHEM,1734
+pygments/regexopt.py,sha256=yMZBB3DRudP4AjPGAUpIF__o_NWOK4HrNfFV6h04V1w,3094
+pygments/scanner.py,sha256=tATA_g4QYMfFS2Tb-WIJtr_abdUetPb4tC1k7b0e97w,3115
+pygments/sphinxext.py,sha256=OVWeIgj0mvRslxw5boeo0tBykJHuSi5jSjIWXgAmqgk,4618
+pygments/style.py,sha256=DhwzS-OOt088Zkk-SWY6lBVy-Eh_2AcP2R_FdTYO9oI,5705
+pygments/styles/__init__.py,sha256=TBRYkROPEACN-kE1nQ1ygrhU4efWVShENqI6aqjk5cE,2894
+pygments/styles/__pycache__/__init__.cpython-38.pyc,,
+pygments/styles/__pycache__/abap.cpython-38.pyc,,
+pygments/styles/__pycache__/algol.cpython-38.pyc,,
+pygments/styles/__pycache__/algol_nu.cpython-38.pyc,,
+pygments/styles/__pycache__/arduino.cpython-38.pyc,,
+pygments/styles/__pycache__/autumn.cpython-38.pyc,,
+pygments/styles/__pycache__/borland.cpython-38.pyc,,
+pygments/styles/__pycache__/bw.cpython-38.pyc,,
+pygments/styles/__pycache__/colorful.cpython-38.pyc,,
+pygments/styles/__pycache__/default.cpython-38.pyc,,
+pygments/styles/__pycache__/emacs.cpython-38.pyc,,
+pygments/styles/__pycache__/friendly.cpython-38.pyc,,
+pygments/styles/__pycache__/fruity.cpython-38.pyc,,
+pygments/styles/__pycache__/igor.cpython-38.pyc,,
+pygments/styles/__pycache__/inkpot.cpython-38.pyc,,
+pygments/styles/__pycache__/lovelace.cpython-38.pyc,,
+pygments/styles/__pycache__/manni.cpython-38.pyc,,
+pygments/styles/__pycache__/monokai.cpython-38.pyc,,
+pygments/styles/__pycache__/murphy.cpython-38.pyc,,
+pygments/styles/__pycache__/native.cpython-38.pyc,,
+pygments/styles/__pycache__/paraiso_dark.cpython-38.pyc,,
+pygments/styles/__pycache__/paraiso_light.cpython-38.pyc,,
+pygments/styles/__pycache__/pastie.cpython-38.pyc,,
+pygments/styles/__pycache__/perldoc.cpython-38.pyc,,
+pygments/styles/__pycache__/rainbow_dash.cpython-38.pyc,,
+pygments/styles/__pycache__/rrt.cpython-38.pyc,,
+pygments/styles/__pycache__/sas.cpython-38.pyc,,
+pygments/styles/__pycache__/solarized.cpython-38.pyc,,
+pygments/styles/__pycache__/stata_dark.cpython-38.pyc,,
+pygments/styles/__pycache__/stata_light.cpython-38.pyc,,
+pygments/styles/__pycache__/tango.cpython-38.pyc,,
+pygments/styles/__pycache__/trac.cpython-38.pyc,,
+pygments/styles/__pycache__/vim.cpython-38.pyc,,
+pygments/styles/__pycache__/vs.cpython-38.pyc,,
+pygments/styles/__pycache__/xcode.cpython-38.pyc,,
+pygments/styles/abap.py,sha256=weNa2ATjBDbWN-EJp36KuapOv_161OYudM6ilzp_5tU,751
+pygments/styles/algol.py,sha256=aVMDywxJ1VRTQ-eYd7CZVQ1BFIWehw2G9OcGg5KmfFI,2263
+pygments/styles/algol_nu.py,sha256=xgZhMlsdR8RppCyaGliUKBWVvianjxt5KrIcWCJDVMM,2278
+pygments/styles/arduino.py,sha256=MtP75GT5SqaAX2PfaC116iPETAPOaD6re6cZ1d9xehQ,4492
+pygments/styles/autumn.py,sha256=setTunOOFJAmdVHab3wmv5OkZmjP6-NVoZjMAyQ2rYY,2144
+pygments/styles/borland.py,sha256=UOFktPmmU_TK6prVMETvVm6FhT01oqsd9_HcG1NZq_Y,1562
+pygments/styles/bw.py,sha256=t0kQytwvh_0SMBcOcmM5foPcc3JWiSd8VWBIXkoP17s,1355
+pygments/styles/colorful.py,sha256=NV-MuEX61J0HH1M0dmurc0RNinp5eA9qIHTjhZ3M6ek,2778
+pygments/styles/default.py,sha256=j124bQ-0TFJaQ2U3ZICWq8_KUOQdjUSxFVknFcpSF40,2532
+pygments/styles/emacs.py,sha256=zNGOC_fHnCZxVphHkieHr7f-zxKkSg_PrFEwWGfQw2U,2486
+pygments/styles/friendly.py,sha256=55qszHEliWiT8h1dW5GjnEA47CpXpJ0BX0C-x6EmZsQ,2515
+pygments/styles/fruity.py,sha256=zkSwyKzmWDs9Jtzgq3rG4DathCH6Pq2JVLuUW8auKXI,1298
+pygments/styles/igor.py,sha256=6GFYt43btx70XZoVDSAqljc1G7UJb6_r9euz0b5nWpY,739
+pygments/styles/inkpot.py,sha256=ecGBxZQw0UhueDHZA06wvgWizu2JzXg9YkYCoLYJuh4,2347
+pygments/styles/lovelace.py,sha256=PBObIz9_gAjMJ8YgNrm-_z2P_wG7moQ1BosKLThJl20,3173
+pygments/styles/manni.py,sha256=EmN6YSp-U-ccxqLqjfnIPg-qkIhUAlSb78tIBvwFCsA,2374
+pygments/styles/monokai.py,sha256=hT5jhhqRQoOmjdK1lZ56hspKke4UDCCiUc3B8m5osLY,5086
+pygments/styles/murphy.py,sha256=ppT--IJLWtcbxKCNRBuusP4zdSmbR8YShosCdd3hpXs,2751
+pygments/styles/native.py,sha256=xkphXXv8PvfbgawNSTR28LcEe1TQxFtdrk_sQcGeo2E,1938
+pygments/styles/paraiso_dark.py,sha256=3a4BSgZQMfB8E2bUMi1WAWkDr98oFUfaPygcsl9B9ZM,5641
+pygments/styles/paraiso_light.py,sha256=QsZyh5oPQb6wYgnoQAkH2MRBkJjRPqAu5De77diOeN8,5645
+pygments/styles/pastie.py,sha256=duELGPs_LEzLbesA39vu0MzxtwkPJ2wnV2rS_clTu2E,2473
+pygments/styles/perldoc.py,sha256=Wf54Io76npBZEsVt8HuM-x7mpzJ7iwPgj5PP_hOf91w,2175
+pygments/styles/rainbow_dash.py,sha256=IlLrIcl76wy4aIiZIRWxMzUILOI9ms7YEX0o6UL9ROc,2480
+pygments/styles/rrt.py,sha256=xQp_B5sDo4BJ4Mzx4PWVK6AW_pZs_XmIoM8zLwpfVTs,852
+pygments/styles/sas.py,sha256=jC6iVFl7-xp0MKwFkPM9QbEInzxVlnhsluPR69iqMZE,1441
+pygments/styles/solarized.py,sha256=f_E9bd-THUcJUJR36hQgbu9BVIjLi6yiI_n07oRu2u4,3747
+pygments/styles/stata_dark.py,sha256=K1AKYh93Jd9E_eWXhDw7-tM6fJbIuFeJcAR5jVE1Nkc,1245
+pygments/styles/stata_light.py,sha256=cN0ulhqteDqKkGnOqAL1aNHy3AvYbmu-fS35XaMptKM,1274
+pygments/styles/tango.py,sha256=1VtAeshYeFh4jWITdb5_wf-7avl1DwtGWrQkvSKqJJo,7096
+pygments/styles/trac.py,sha256=wWJokrY8EWWxJTChPxxYsH_cB-CNN7coa1ZBihzbiG4,1933
+pygments/styles/vim.py,sha256=9PtHne1K4TmKIFcPoM4NY_HRV3naKXRIeEvMC437t7U,1976
+pygments/styles/vs.py,sha256=-mK8_RJJk12gbR-TXP1zedQpflKS2zc9xQQzHbZTB1E,1073
+pygments/styles/xcode.py,sha256=s3NuWSoZ8dRCuU0PU0-aDop4xqgAXP4rVefg5yFgQVg,1501
+pygments/token.py,sha256=J1LOX6vjhiN3pTShN9Mj0MfbWPzhypuPQYZuw29E8As,6167
+pygments/unistring.py,sha256=V4LPrb9dhNBGR-AdEnopDNmwpxFSodqPBuHOlqG9b0g,64569
+pygments/util.py,sha256=586xXHiJGGZxqk5PMBu3vBhE68DLuAe5MBARWrSPGxA,10778

+ 5 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.33.6)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 3 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/entry_points.txt

@@ -0,0 +1,3 @@
+[console_scripts]
+pygmentize = pygments.cmdline:main
+

+ 1 - 0
Lib/site-packages/Pygments-2.6.1.dist-info/top_level.txt

@@ -0,0 +1 @@
+pygments

BIN
Lib/site-packages/_yaml.cp38-win_amd64.pyd


+ 1 - 0
Lib/site-packages/click-7.1.2.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 28 - 0
Lib/site-packages/click-7.1.2.dist-info/LICENSE.rst

@@ -0,0 +1,28 @@
+Copyright 2014 Pallets
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+1.  Redistributions of source code must retain the above copyright
+    notice, this list of conditions and the following disclaimer.
+
+2.  Redistributions in binary form must reproduce the above copyright
+    notice, this list of conditions and the following disclaimer in the
+    documentation and/or other materials provided with the distribution.
+
+3.  Neither the name of the copyright holder nor the names of its
+    contributors may be used to endorse or promote products derived from
+    this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
+PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

+ 102 - 0
Lib/site-packages/click-7.1.2.dist-info/METADATA

@@ -0,0 +1,102 @@
+Metadata-Version: 2.1
+Name: click
+Version: 7.1.2
+Summary: Composable command line interface toolkit
+Home-page: https://palletsprojects.com/p/click/
+Maintainer: Pallets
+Maintainer-email: contact@palletsprojects.com
+License: BSD-3-Clause
+Project-URL: Documentation, https://click.palletsprojects.com/
+Project-URL: Code, https://github.com/pallets/click
+Project-URL: Issue tracker, https://github.com/pallets/click/issues
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: BSD License
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 3
+Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
+
+\$ click\_
+==========
+
+Click is a Python package for creating beautiful command line interfaces
+in a composable way with as little code as necessary. It's the "Command
+Line Interface Creation Kit". It's highly configurable but comes with
+sensible defaults out of the box.
+
+It aims to make the process of writing command line tools quick and fun
+while also preventing any frustration caused by the inability to
+implement an intended CLI API.
+
+Click in three points:
+
+-   Arbitrary nesting of commands
+-   Automatic help page generation
+-   Supports lazy loading of subcommands at runtime
+
+
+Installing
+----------
+
+Install and update using `pip`_:
+
+.. code-block:: text
+
+    $ pip install -U click
+
+.. _pip: https://pip.pypa.io/en/stable/quickstart/
+
+
+A Simple Example
+----------------
+
+.. code-block:: python
+
+    import click
+
+    @click.command()
+    @click.option("--count", default=1, help="Number of greetings.")
+    @click.option("--name", prompt="Your name", help="The person to greet.")
+    def hello(count, name):
+        """Simple program that greets NAME for a total of COUNT times."""
+        for _ in range(count):
+            click.echo(f"Hello, {name}!")
+
+    if __name__ == '__main__':
+        hello()
+
+.. code-block:: text
+
+    $ python hello.py --count=3
+    Your name: Click
+    Hello, Click!
+    Hello, Click!
+    Hello, Click!
+
+
+Donate
+------
+
+The Pallets organization develops and supports Click and other popular
+packages. In order to grow the community of contributors and users, and
+allow the maintainers to devote more time to the projects, `please
+donate today`_.
+
+.. _please donate today: https://palletsprojects.com/donate
+
+
+Links
+-----
+
+-   Website: https://palletsprojects.com/p/click/
+-   Documentation: https://click.palletsprojects.com/
+-   Releases: https://pypi.org/project/click/
+-   Code: https://github.com/pallets/click
+-   Issue tracker: https://github.com/pallets/click/issues
+-   Test status: https://dev.azure.com/pallets/click/_build
+-   Official chat: https://discord.gg/t6rrQZH
+
+

+ 40 - 0
Lib/site-packages/click-7.1.2.dist-info/RECORD

@@ -0,0 +1,40 @@
+click-7.1.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+click-7.1.2.dist-info/LICENSE.rst,sha256=morRBqOU6FO_4h9C9OctWSgZoigF2ZG18ydQKSkrZY0,1475
+click-7.1.2.dist-info/METADATA,sha256=LrRgakZKV7Yg3qJqX_plu2WhFW81MzP3EqQmZhHIO8M,2868
+click-7.1.2.dist-info/RECORD,,
+click-7.1.2.dist-info/WHEEL,sha256=kGT74LWyRUZrL4VgLh6_g12IeVl_9u9ZVhadrgXZUEY,110
+click-7.1.2.dist-info/top_level.txt,sha256=J1ZQogalYS4pphY_lPECoNMfw0HzTSrZglC4Yfwo4xA,6
+click/__init__.py,sha256=FkyGDQ-cbiQxP_lxgUspyFYS48f2S_pTcfKPz-d_RMo,2463
+click/__pycache__/__init__.cpython-38.pyc,,
+click/__pycache__/_bashcomplete.cpython-38.pyc,,
+click/__pycache__/_compat.cpython-38.pyc,,
+click/__pycache__/_termui_impl.cpython-38.pyc,,
+click/__pycache__/_textwrap.cpython-38.pyc,,
+click/__pycache__/_unicodefun.cpython-38.pyc,,
+click/__pycache__/_winconsole.cpython-38.pyc,,
+click/__pycache__/core.cpython-38.pyc,,
+click/__pycache__/decorators.cpython-38.pyc,,
+click/__pycache__/exceptions.cpython-38.pyc,,
+click/__pycache__/formatting.cpython-38.pyc,,
+click/__pycache__/globals.cpython-38.pyc,,
+click/__pycache__/parser.cpython-38.pyc,,
+click/__pycache__/termui.cpython-38.pyc,,
+click/__pycache__/testing.cpython-38.pyc,,
+click/__pycache__/types.cpython-38.pyc,,
+click/__pycache__/utils.cpython-38.pyc,,
+click/_bashcomplete.py,sha256=9J98IHQYmCAr2Jup6TDshUr5FJEen-AoQCZR0K5nKxQ,12309
+click/_compat.py,sha256=AoMaYnZ-3pwtNXuHtlb6_UXsayoG0QZiHKIRy2VFezc,24169
+click/_termui_impl.py,sha256=yNktUMAdjYOU1HMkq915jR3zgAzUNtGSQqSTSSMn3eQ,20702
+click/_textwrap.py,sha256=ajCzkzFly5tjm9foQ5N9_MOeaYJMBjAltuFa69n4iXY,1197
+click/_unicodefun.py,sha256=apLSNEBZgUsQNPMUv072zJ1swqnm0dYVT5TqcIWTt6w,4201
+click/_winconsole.py,sha256=6YDu6Rq1Wxx4w9uinBMK2LHvP83aerZM9GQurlk3QDo,10010
+click/core.py,sha256=V6DJzastGhrC6WTDwV9MSLwcJUdX2Uf1ypmgkjBdn_Y,77650
+click/decorators.py,sha256=3TvEO_BkaHl7k6Eh1G5eC7JK4LKPdpFqH9JP0QDyTlM,11215
+click/exceptions.py,sha256=3pQAyyMFzx5A3eV0Y27WtDTyGogZRbrC6_o5DjjKBbw,8118
+click/formatting.py,sha256=Wb4gqFEpWaKPgAbOvnkCl8p-bEZx5KpM5ZSByhlnJNk,9281
+click/globals.py,sha256=ht7u2kUGI08pAarB4e4yC8Lkkxy6gJfRZyzxEj8EbWQ,1501
+click/parser.py,sha256=mFK-k58JtPpqO0AC36WAr0t5UfzEw1mvgVSyn7WCe9M,15691
+click/termui.py,sha256=G7QBEKIepRIGLvNdGwBTYiEtSImRxvTO_AglVpyHH2s,23998
+click/testing.py,sha256=EUEsDUqNXFgCLhZ0ZFOROpaVDA5I_rijwnNPE6qICgA,12854
+click/types.py,sha256=wuubik4VqgqAw5dvbYFkDt-zSAx97y9TQXuXcVaRyQA,25045
+click/utils.py,sha256=4VEcJ7iEHwjnFuzEuRtkT99o5VG3zqSD7Q2CVzv13WU,15940

+ 6 - 0
Lib/site-packages/click-7.1.2.dist-info/WHEEL

@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+

+ 1 - 0
Lib/site-packages/click-7.1.2.dist-info/top_level.txt

@@ -0,0 +1 @@
+click

+ 79 - 0
Lib/site-packages/click/__init__.py

@@ -0,0 +1,79 @@
+"""
+Click is a simple Python module inspired by the stdlib optparse to make
+writing command line scripts fun. Unlike other modules, it's based
+around a simple API that does not come with too much magic and is
+composable.
+"""
+from .core import Argument
+from .core import BaseCommand
+from .core import Command
+from .core import CommandCollection
+from .core import Context
+from .core import Group
+from .core import MultiCommand
+from .core import Option
+from .core import Parameter
+from .decorators import argument
+from .decorators import command
+from .decorators import confirmation_option
+from .decorators import group
+from .decorators import help_option
+from .decorators import make_pass_decorator
+from .decorators import option
+from .decorators import pass_context
+from .decorators import pass_obj
+from .decorators import password_option
+from .decorators import version_option
+from .exceptions import Abort
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import BadParameter
+from .exceptions import ClickException
+from .exceptions import FileError
+from .exceptions import MissingParameter
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+from .formatting import HelpFormatter
+from .formatting import wrap_text
+from .globals import get_current_context
+from .parser import OptionParser
+from .termui import clear
+from .termui import confirm
+from .termui import echo_via_pager
+from .termui import edit
+from .termui import get_terminal_size
+from .termui import getchar
+from .termui import launch
+from .termui import pause
+from .termui import progressbar
+from .termui import prompt
+from .termui import secho
+from .termui import style
+from .termui import unstyle
+from .types import BOOL
+from .types import Choice
+from .types import DateTime
+from .types import File
+from .types import FLOAT
+from .types import FloatRange
+from .types import INT
+from .types import IntRange
+from .types import ParamType
+from .types import Path
+from .types import STRING
+from .types import Tuple
+from .types import UNPROCESSED
+from .types import UUID
+from .utils import echo
+from .utils import format_filename
+from .utils import get_app_dir
+from .utils import get_binary_stream
+from .utils import get_os_args
+from .utils import get_text_stream
+from .utils import open_file
+
+# Controls if click should emit the warning about the use of unicode
+# literals.
+disable_unicode_literals_warning = False
+
+__version__ = "7.1.2"

+ 375 - 0
Lib/site-packages/click/_bashcomplete.py

@@ -0,0 +1,375 @@
+import copy
+import os
+import re
+
+from .core import Argument
+from .core import MultiCommand
+from .core import Option
+from .parser import split_arg_string
+from .types import Choice
+from .utils import echo
+
+try:
+    from collections import abc
+except ImportError:
+    import collections as abc
+
+WORDBREAK = "="
+
+# Note, only BASH version 4.4 and later have the nosort option.
+COMPLETION_SCRIPT_BASH = """
+%(complete_func)s() {
+    local IFS=$'\n'
+    COMPREPLY=( $( env COMP_WORDS="${COMP_WORDS[*]}" \\
+                   COMP_CWORD=$COMP_CWORD \\
+                   %(autocomplete_var)s=complete $1 ) )
+    return 0
+}
+
+%(complete_func)setup() {
+    local COMPLETION_OPTIONS=""
+    local BASH_VERSION_ARR=(${BASH_VERSION//./ })
+    # Only BASH version 4.4 and later have the nosort option.
+    if [ ${BASH_VERSION_ARR[0]} -gt 4 ] || ([ ${BASH_VERSION_ARR[0]} -eq 4 ] \
+&& [ ${BASH_VERSION_ARR[1]} -ge 4 ]); then
+        COMPLETION_OPTIONS="-o nosort"
+    fi
+
+    complete $COMPLETION_OPTIONS -F %(complete_func)s %(script_names)s
+}
+
+%(complete_func)setup
+"""
+
+COMPLETION_SCRIPT_ZSH = """
+#compdef %(script_names)s
+
+%(complete_func)s() {
+    local -a completions
+    local -a completions_with_descriptions
+    local -a response
+    (( ! $+commands[%(script_names)s] )) && return 1
+
+    response=("${(@f)$( env COMP_WORDS=\"${words[*]}\" \\
+                        COMP_CWORD=$((CURRENT-1)) \\
+                        %(autocomplete_var)s=\"complete_zsh\" \\
+                        %(script_names)s )}")
+
+    for key descr in ${(kv)response}; do
+      if [[ "$descr" == "_" ]]; then
+          completions+=("$key")
+      else
+          completions_with_descriptions+=("$key":"$descr")
+      fi
+    done
+
+    if [ -n "$completions_with_descriptions" ]; then
+        _describe -V unsorted completions_with_descriptions -U
+    fi
+
+    if [ -n "$completions" ]; then
+        compadd -U -V unsorted -a completions
+    fi
+    compstate[insert]="automenu"
+}
+
+compdef %(complete_func)s %(script_names)s
+"""
+
+COMPLETION_SCRIPT_FISH = (
+    "complete --no-files --command %(script_names)s --arguments"
+    ' "(env %(autocomplete_var)s=complete_fish'
+    " COMP_WORDS=(commandline -cp) COMP_CWORD=(commandline -t)"
+    ' %(script_names)s)"'
+)
+
+_completion_scripts = {
+    "bash": COMPLETION_SCRIPT_BASH,
+    "zsh": COMPLETION_SCRIPT_ZSH,
+    "fish": COMPLETION_SCRIPT_FISH,
+}
+
+_invalid_ident_char_re = re.compile(r"[^a-zA-Z0-9_]")
+
+
+def get_completion_script(prog_name, complete_var, shell):
+    cf_name = _invalid_ident_char_re.sub("", prog_name.replace("-", "_"))
+    script = _completion_scripts.get(shell, COMPLETION_SCRIPT_BASH)
+    return (
+        script
+        % {
+            "complete_func": "_{}_completion".format(cf_name),
+            "script_names": prog_name,
+            "autocomplete_var": complete_var,
+        }
+    ).strip() + ";"
+
+
+def resolve_ctx(cli, prog_name, args):
+    """Parse into a hierarchy of contexts. Contexts are connected
+    through the parent variable.
+
+    :param cli: command definition
+    :param prog_name: the program that is running
+    :param args: full list of args
+    :return: the final context/command parsed
+    """
+    ctx = cli.make_context(prog_name, args, resilient_parsing=True)
+    args = ctx.protected_args + ctx.args
+    while args:
+        if isinstance(ctx.command, MultiCommand):
+            if not ctx.command.chain:
+                cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+                if cmd is None:
+                    return ctx
+                ctx = cmd.make_context(
+                    cmd_name, args, parent=ctx, resilient_parsing=True
+                )
+                args = ctx.protected_args + ctx.args
+            else:
+                # Walk chained subcommand contexts saving the last one.
+                while args:
+                    cmd_name, cmd, args = ctx.command.resolve_command(ctx, args)
+                    if cmd is None:
+                        return ctx
+                    sub_ctx = cmd.make_context(
+                        cmd_name,
+                        args,
+                        parent=ctx,
+                        allow_extra_args=True,
+                        allow_interspersed_args=False,
+                        resilient_parsing=True,
+                    )
+                    args = sub_ctx.args
+                ctx = sub_ctx
+                args = sub_ctx.protected_args + sub_ctx.args
+        else:
+            break
+    return ctx
+
+
+def start_of_option(param_str):
+    """
+    :param param_str: param_str to check
+    :return: whether or not this is the start of an option declaration
+        (i.e. starts "-" or "--")
+    """
+    return param_str and param_str[:1] == "-"
+
+
+def is_incomplete_option(all_args, cmd_param):
+    """
+    :param all_args: the full original list of args supplied
+    :param cmd_param: the current command paramter
+    :return: whether or not the last option declaration (i.e. starts
+        "-" or "--") is incomplete and corresponds to this cmd_param. In
+        other words whether this cmd_param option can still accept
+        values
+    """
+    if not isinstance(cmd_param, Option):
+        return False
+    if cmd_param.is_flag:
+        return False
+    last_option = None
+    for index, arg_str in enumerate(
+        reversed([arg for arg in all_args if arg != WORDBREAK])
+    ):
+        if index + 1 > cmd_param.nargs:
+            break
+        if start_of_option(arg_str):
+            last_option = arg_str
+
+    return True if last_option and last_option in cmd_param.opts else False
+
+
+def is_incomplete_argument(current_params, cmd_param):
+    """
+    :param current_params: the current params and values for this
+        argument as already entered
+    :param cmd_param: the current command parameter
+    :return: whether or not the last argument is incomplete and
+        corresponds to this cmd_param. In other words whether or not the
+        this cmd_param argument can still accept values
+    """
+    if not isinstance(cmd_param, Argument):
+        return False
+    current_param_values = current_params[cmd_param.name]
+    if current_param_values is None:
+        return True
+    if cmd_param.nargs == -1:
+        return True
+    if (
+        isinstance(current_param_values, abc.Iterable)
+        and cmd_param.nargs > 1
+        and len(current_param_values) < cmd_param.nargs
+    ):
+        return True
+    return False
+
+
+def get_user_autocompletions(ctx, args, incomplete, cmd_param):
+    """
+    :param ctx: context associated with the parsed command
+    :param args: full list of args
+    :param incomplete: the incomplete text to autocomplete
+    :param cmd_param: command definition
+    :return: all the possible user-specified completions for the param
+    """
+    results = []
+    if isinstance(cmd_param.type, Choice):
+        # Choices don't support descriptions.
+        results = [
+            (c, None) for c in cmd_param.type.choices if str(c).startswith(incomplete)
+        ]
+    elif cmd_param.autocompletion is not None:
+        dynamic_completions = cmd_param.autocompletion(
+            ctx=ctx, args=args, incomplete=incomplete
+        )
+        results = [
+            c if isinstance(c, tuple) else (c, None) for c in dynamic_completions
+        ]
+    return results
+
+
+def get_visible_commands_starting_with(ctx, starts_with):
+    """
+    :param ctx: context associated with the parsed command
+    :starts_with: string that visible commands must start with.
+    :return: all visible (not hidden) commands that start with starts_with.
+    """
+    for c in ctx.command.list_commands(ctx):
+        if c.startswith(starts_with):
+            command = ctx.command.get_command(ctx, c)
+            if not command.hidden:
+                yield command
+
+
+def add_subcommand_completions(ctx, incomplete, completions_out):
+    # Add subcommand completions.
+    if isinstance(ctx.command, MultiCommand):
+        completions_out.extend(
+            [
+                (c.name, c.get_short_help_str())
+                for c in get_visible_commands_starting_with(ctx, incomplete)
+            ]
+        )
+
+    # Walk up the context list and add any other completion
+    # possibilities from chained commands
+    while ctx.parent is not None:
+        ctx = ctx.parent
+        if isinstance(ctx.command, MultiCommand) and ctx.command.chain:
+            remaining_commands = [
+                c
+                for c in get_visible_commands_starting_with(ctx, incomplete)
+                if c.name not in ctx.protected_args
+            ]
+            completions_out.extend(
+                [(c.name, c.get_short_help_str()) for c in remaining_commands]
+            )
+
+
+def get_choices(cli, prog_name, args, incomplete):
+    """
+    :param cli: command definition
+    :param prog_name: the program that is running
+    :param args: full list of args
+    :param incomplete: the incomplete text to autocomplete
+    :return: all the possible completions for the incomplete
+    """
+    all_args = copy.deepcopy(args)
+
+    ctx = resolve_ctx(cli, prog_name, args)
+    if ctx is None:
+        return []
+
+    has_double_dash = "--" in all_args
+
+    # In newer versions of bash long opts with '='s are partitioned, but
+    # it's easier to parse without the '='
+    if start_of_option(incomplete) and WORDBREAK in incomplete:
+        partition_incomplete = incomplete.partition(WORDBREAK)
+        all_args.append(partition_incomplete[0])
+        incomplete = partition_incomplete[2]
+    elif incomplete == WORDBREAK:
+        incomplete = ""
+
+    completions = []
+    if not has_double_dash and start_of_option(incomplete):
+        # completions for partial options
+        for param in ctx.command.params:
+            if isinstance(param, Option) and not param.hidden:
+                param_opts = [
+                    param_opt
+                    for param_opt in param.opts + param.secondary_opts
+                    if param_opt not in all_args or param.multiple
+                ]
+                completions.extend(
+                    [(o, param.help) for o in param_opts if o.startswith(incomplete)]
+                )
+        return completions
+    # completion for option values from user supplied values
+    for param in ctx.command.params:
+        if is_incomplete_option(all_args, param):
+            return get_user_autocompletions(ctx, all_args, incomplete, param)
+    # completion for argument values from user supplied values
+    for param in ctx.command.params:
+        if is_incomplete_argument(ctx.params, param):
+            return get_user_autocompletions(ctx, all_args, incomplete, param)
+
+    add_subcommand_completions(ctx, incomplete, completions)
+    # Sort before returning so that proper ordering can be enforced in custom types.
+    return sorted(completions)
+
+
+def do_complete(cli, prog_name, include_descriptions):
+    cwords = split_arg_string(os.environ["COMP_WORDS"])
+    cword = int(os.environ["COMP_CWORD"])
+    args = cwords[1:cword]
+    try:
+        incomplete = cwords[cword]
+    except IndexError:
+        incomplete = ""
+
+    for item in get_choices(cli, prog_name, args, incomplete):
+        echo(item[0])
+        if include_descriptions:
+            # ZSH has trouble dealing with empty array parameters when
+            # returned from commands, use '_' to indicate no description
+            # is present.
+            echo(item[1] if item[1] else "_")
+
+    return True
+
+
+def do_complete_fish(cli, prog_name):
+    cwords = split_arg_string(os.environ["COMP_WORDS"])
+    incomplete = os.environ["COMP_CWORD"]
+    args = cwords[1:]
+
+    for item in get_choices(cli, prog_name, args, incomplete):
+        if item[1]:
+            echo("{arg}\t{desc}".format(arg=item[0], desc=item[1]))
+        else:
+            echo(item[0])
+
+    return True
+
+
+def bashcomplete(cli, prog_name, complete_var, complete_instr):
+    if "_" in complete_instr:
+        command, shell = complete_instr.split("_", 1)
+    else:
+        command = complete_instr
+        shell = "bash"
+
+    if command == "source":
+        echo(get_completion_script(prog_name, complete_var, shell))
+        return True
+    elif command == "complete":
+        if shell == "fish":
+            return do_complete_fish(cli, prog_name)
+        elif shell in {"bash", "zsh"}:
+            return do_complete(cli, prog_name, shell == "zsh")
+
+    return False

+ 786 - 0
Lib/site-packages/click/_compat.py

@@ -0,0 +1,786 @@
+# flake8: noqa
+import codecs
+import io
+import os
+import re
+import sys
+from weakref import WeakKeyDictionary
+
+PY2 = sys.version_info[0] == 2
+CYGWIN = sys.platform.startswith("cygwin")
+MSYS2 = sys.platform.startswith("win") and ("GCC" in sys.version)
+# Determine local App Engine environment, per Google's own suggestion
+APP_ENGINE = "APPENGINE_RUNTIME" in os.environ and "Development/" in os.environ.get(
+    "SERVER_SOFTWARE", ""
+)
+WIN = sys.platform.startswith("win") and not APP_ENGINE and not MSYS2
+DEFAULT_COLUMNS = 80
+
+
+_ansi_re = re.compile(r"\033\[[;?0-9]*[a-zA-Z]")
+
+
+def get_filesystem_encoding():
+    return sys.getfilesystemencoding() or sys.getdefaultencoding()
+
+
+def _make_text_stream(
+    stream, encoding, errors, force_readable=False, force_writable=False
+):
+    if encoding is None:
+        encoding = get_best_encoding(stream)
+    if errors is None:
+        errors = "replace"
+    return _NonClosingTextIOWrapper(
+        stream,
+        encoding,
+        errors,
+        line_buffering=True,
+        force_readable=force_readable,
+        force_writable=force_writable,
+    )
+
+
+def is_ascii_encoding(encoding):
+    """Checks if a given encoding is ascii."""
+    try:
+        return codecs.lookup(encoding).name == "ascii"
+    except LookupError:
+        return False
+
+
+def get_best_encoding(stream):
+    """Returns the default stream encoding if not found."""
+    rv = getattr(stream, "encoding", None) or sys.getdefaultencoding()
+    if is_ascii_encoding(rv):
+        return "utf-8"
+    return rv
+
+
+class _NonClosingTextIOWrapper(io.TextIOWrapper):
+    def __init__(
+        self,
+        stream,
+        encoding,
+        errors,
+        force_readable=False,
+        force_writable=False,
+        **extra
+    ):
+        self._stream = stream = _FixupStream(stream, force_readable, force_writable)
+        io.TextIOWrapper.__init__(self, stream, encoding, errors, **extra)
+
+    # The io module is a place where the Python 3 text behavior
+    # was forced upon Python 2, so we need to unbreak
+    # it to look like Python 2.
+    if PY2:
+
+        def write(self, x):
+            if isinstance(x, str) or is_bytes(x):
+                try:
+                    self.flush()
+                except Exception:
+                    pass
+                return self.buffer.write(str(x))
+            return io.TextIOWrapper.write(self, x)
+
+        def writelines(self, lines):
+            for line in lines:
+                self.write(line)
+
+    def __del__(self):
+        try:
+            self.detach()
+        except Exception:
+            pass
+
+    def isatty(self):
+        # https://bitbucket.org/pypy/pypy/issue/1803
+        return self._stream.isatty()
+
+
+class _FixupStream(object):
+    """The new io interface needs more from streams than streams
+    traditionally implement.  As such, this fix-up code is necessary in
+    some circumstances.
+
+    The forcing of readable and writable flags are there because some tools
+    put badly patched objects on sys (one such offender are certain version
+    of jupyter notebook).
+    """
+
+    def __init__(self, stream, force_readable=False, force_writable=False):
+        self._stream = stream
+        self._force_readable = force_readable
+        self._force_writable = force_writable
+
+    def __getattr__(self, name):
+        return getattr(self._stream, name)
+
+    def read1(self, size):
+        f = getattr(self._stream, "read1", None)
+        if f is not None:
+            return f(size)
+        # We only dispatch to readline instead of read in Python 2 as we
+        # do not want cause problems with the different implementation
+        # of line buffering.
+        if PY2:
+            return self._stream.readline(size)
+        return self._stream.read(size)
+
+    def readable(self):
+        if self._force_readable:
+            return True
+        x = getattr(self._stream, "readable", None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.read(0)
+        except Exception:
+            return False
+        return True
+
+    def writable(self):
+        if self._force_writable:
+            return True
+        x = getattr(self._stream, "writable", None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.write("")
+        except Exception:
+            try:
+                self._stream.write(b"")
+            except Exception:
+                return False
+        return True
+
+    def seekable(self):
+        x = getattr(self._stream, "seekable", None)
+        if x is not None:
+            return x()
+        try:
+            self._stream.seek(self._stream.tell())
+        except Exception:
+            return False
+        return True
+
+
+if PY2:
+    text_type = unicode
+    raw_input = raw_input
+    string_types = (str, unicode)
+    int_types = (int, long)
+    iteritems = lambda x: x.iteritems()
+    range_type = xrange
+
+    def is_bytes(x):
+        return isinstance(x, (buffer, bytearray))
+
+    _identifier_re = re.compile(r"^[a-zA-Z_][a-zA-Z0-9_]*$")
+
+    # For Windows, we need to force stdout/stdin/stderr to binary if it's
+    # fetched for that.  This obviously is not the most correct way to do
+    # it as it changes global state.  Unfortunately, there does not seem to
+    # be a clear better way to do it as just reopening the file in binary
+    # mode does not change anything.
+    #
+    # An option would be to do what Python 3 does and to open the file as
+    # binary only, patch it back to the system, and then use a wrapper
+    # stream that converts newlines.  It's not quite clear what's the
+    # correct option here.
+    #
+    # This code also lives in _winconsole for the fallback to the console
+    # emulation stream.
+    #
+    # There are also Windows environments where the `msvcrt` module is not
+    # available (which is why we use try-catch instead of the WIN variable
+    # here), such as the Google App Engine development server on Windows. In
+    # those cases there is just nothing we can do.
+    def set_binary_mode(f):
+        return f
+
+    try:
+        import msvcrt
+    except ImportError:
+        pass
+    else:
+
+        def set_binary_mode(f):
+            try:
+                fileno = f.fileno()
+            except Exception:
+                pass
+            else:
+                msvcrt.setmode(fileno, os.O_BINARY)
+            return f
+
+    try:
+        import fcntl
+    except ImportError:
+        pass
+    else:
+
+        def set_binary_mode(f):
+            try:
+                fileno = f.fileno()
+            except Exception:
+                pass
+            else:
+                flags = fcntl.fcntl(fileno, fcntl.F_GETFL)
+                fcntl.fcntl(fileno, fcntl.F_SETFL, flags & ~os.O_NONBLOCK)
+            return f
+
+    def isidentifier(x):
+        return _identifier_re.search(x) is not None
+
+    def get_binary_stdin():
+        return set_binary_mode(sys.stdin)
+
+    def get_binary_stdout():
+        _wrap_std_stream("stdout")
+        return set_binary_mode(sys.stdout)
+
+    def get_binary_stderr():
+        _wrap_std_stream("stderr")
+        return set_binary_mode(sys.stderr)
+
+    def get_text_stdin(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stdin, encoding, errors, force_readable=True)
+
+    def get_text_stdout(encoding=None, errors=None):
+        _wrap_std_stream("stdout")
+        rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stdout, encoding, errors, force_writable=True)
+
+    def get_text_stderr(encoding=None, errors=None):
+        _wrap_std_stream("stderr")
+        rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+        if rv is not None:
+            return rv
+        return _make_text_stream(sys.stderr, encoding, errors, force_writable=True)
+
+    def filename_to_ui(value):
+        if isinstance(value, bytes):
+            value = value.decode(get_filesystem_encoding(), "replace")
+        return value
+
+
+else:
+    import io
+
+    text_type = str
+    raw_input = input
+    string_types = (str,)
+    int_types = (int,)
+    range_type = range
+    isidentifier = lambda x: x.isidentifier()
+    iteritems = lambda x: iter(x.items())
+
+    def is_bytes(x):
+        return isinstance(x, (bytes, memoryview, bytearray))
+
+    def _is_binary_reader(stream, default=False):
+        try:
+            return isinstance(stream.read(0), bytes)
+        except Exception:
+            return default
+            # This happens in some cases where the stream was already
+            # closed.  In this case, we assume the default.
+
+    def _is_binary_writer(stream, default=False):
+        try:
+            stream.write(b"")
+        except Exception:
+            try:
+                stream.write("")
+                return False
+            except Exception:
+                pass
+            return default
+        return True
+
+    def _find_binary_reader(stream):
+        # We need to figure out if the given stream is already binary.
+        # This can happen because the official docs recommend detaching
+        # the streams to get binary streams.  Some code might do this, so
+        # we need to deal with this case explicitly.
+        if _is_binary_reader(stream, False):
+            return stream
+
+        buf = getattr(stream, "buffer", None)
+
+        # Same situation here; this time we assume that the buffer is
+        # actually binary in case it's closed.
+        if buf is not None and _is_binary_reader(buf, True):
+            return buf
+
+    def _find_binary_writer(stream):
+        # We need to figure out if the given stream is already binary.
+        # This can happen because the official docs recommend detatching
+        # the streams to get binary streams.  Some code might do this, so
+        # we need to deal with this case explicitly.
+        if _is_binary_writer(stream, False):
+            return stream
+
+        buf = getattr(stream, "buffer", None)
+
+        # Same situation here; this time we assume that the buffer is
+        # actually binary in case it's closed.
+        if buf is not None and _is_binary_writer(buf, True):
+            return buf
+
+    def _stream_is_misconfigured(stream):
+        """A stream is misconfigured if its encoding is ASCII."""
+        # If the stream does not have an encoding set, we assume it's set
+        # to ASCII.  This appears to happen in certain unittest
+        # environments.  It's not quite clear what the correct behavior is
+        # but this at least will force Click to recover somehow.
+        return is_ascii_encoding(getattr(stream, "encoding", None) or "ascii")
+
+    def _is_compat_stream_attr(stream, attr, value):
+        """A stream attribute is compatible if it is equal to the
+        desired value or the desired value is unset and the attribute
+        has a value.
+        """
+        stream_value = getattr(stream, attr, None)
+        return stream_value == value or (value is None and stream_value is not None)
+
+    def _is_compatible_text_stream(stream, encoding, errors):
+        """Check if a stream's encoding and errors attributes are
+        compatible with the desired values.
+        """
+        return _is_compat_stream_attr(
+            stream, "encoding", encoding
+        ) and _is_compat_stream_attr(stream, "errors", errors)
+
+    def _force_correct_text_stream(
+        text_stream,
+        encoding,
+        errors,
+        is_binary,
+        find_binary,
+        force_readable=False,
+        force_writable=False,
+    ):
+        if is_binary(text_stream, False):
+            binary_reader = text_stream
+        else:
+            # If the stream looks compatible, and won't default to a
+            # misconfigured ascii encoding, return it as-is.
+            if _is_compatible_text_stream(text_stream, encoding, errors) and not (
+                encoding is None and _stream_is_misconfigured(text_stream)
+            ):
+                return text_stream
+
+            # Otherwise, get the underlying binary reader.
+            binary_reader = find_binary(text_stream)
+
+            # If that's not possible, silently use the original reader
+            # and get mojibake instead of exceptions.
+            if binary_reader is None:
+                return text_stream
+
+        # Default errors to replace instead of strict in order to get
+        # something that works.
+        if errors is None:
+            errors = "replace"
+
+        # Wrap the binary stream in a text stream with the correct
+        # encoding parameters.
+        return _make_text_stream(
+            binary_reader,
+            encoding,
+            errors,
+            force_readable=force_readable,
+            force_writable=force_writable,
+        )
+
+    def _force_correct_text_reader(text_reader, encoding, errors, force_readable=False):
+        return _force_correct_text_stream(
+            text_reader,
+            encoding,
+            errors,
+            _is_binary_reader,
+            _find_binary_reader,
+            force_readable=force_readable,
+        )
+
+    def _force_correct_text_writer(text_writer, encoding, errors, force_writable=False):
+        return _force_correct_text_stream(
+            text_writer,
+            encoding,
+            errors,
+            _is_binary_writer,
+            _find_binary_writer,
+            force_writable=force_writable,
+        )
+
+    def get_binary_stdin():
+        reader = _find_binary_reader(sys.stdin)
+        if reader is None:
+            raise RuntimeError("Was not able to determine binary stream for sys.stdin.")
+        return reader
+
+    def get_binary_stdout():
+        writer = _find_binary_writer(sys.stdout)
+        if writer is None:
+            raise RuntimeError(
+                "Was not able to determine binary stream for sys.stdout."
+            )
+        return writer
+
+    def get_binary_stderr():
+        writer = _find_binary_writer(sys.stderr)
+        if writer is None:
+            raise RuntimeError(
+                "Was not able to determine binary stream for sys.stderr."
+            )
+        return writer
+
+    def get_text_stdin(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdin, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_reader(
+            sys.stdin, encoding, errors, force_readable=True
+        )
+
+    def get_text_stdout(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stdout, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_writer(
+            sys.stdout, encoding, errors, force_writable=True
+        )
+
+    def get_text_stderr(encoding=None, errors=None):
+        rv = _get_windows_console_stream(sys.stderr, encoding, errors)
+        if rv is not None:
+            return rv
+        return _force_correct_text_writer(
+            sys.stderr, encoding, errors, force_writable=True
+        )
+
+    def filename_to_ui(value):
+        if isinstance(value, bytes):
+            value = value.decode(get_filesystem_encoding(), "replace")
+        else:
+            value = value.encode("utf-8", "surrogateescape").decode("utf-8", "replace")
+        return value
+
+
+def get_streerror(e, default=None):
+    if hasattr(e, "strerror"):
+        msg = e.strerror
+    else:
+        if default is not None:
+            msg = default
+        else:
+            msg = str(e)
+    if isinstance(msg, bytes):
+        msg = msg.decode("utf-8", "replace")
+    return msg
+
+
+def _wrap_io_open(file, mode, encoding, errors):
+    """On Python 2, :func:`io.open` returns a text file wrapper that
+    requires passing ``unicode`` to ``write``. Need to open the file in
+    binary mode then wrap it in a subclass that can write ``str`` and
+    ``unicode``.
+
+    Also handles not passing ``encoding`` and ``errors`` in binary mode.
+    """
+    binary = "b" in mode
+
+    if binary:
+        kwargs = {}
+    else:
+        kwargs = {"encoding": encoding, "errors": errors}
+
+    if not PY2 or binary:
+        return io.open(file, mode, **kwargs)
+
+    f = io.open(file, "{}b".format(mode.replace("t", "")))
+    return _make_text_stream(f, **kwargs)
+
+
+def open_stream(filename, mode="r", encoding=None, errors="strict", atomic=False):
+    binary = "b" in mode
+
+    # Standard streams first.  These are simple because they don't need
+    # special handling for the atomic flag.  It's entirely ignored.
+    if filename == "-":
+        if any(m in mode for m in ["w", "a", "x"]):
+            if binary:
+                return get_binary_stdout(), False
+            return get_text_stdout(encoding=encoding, errors=errors), False
+        if binary:
+            return get_binary_stdin(), False
+        return get_text_stdin(encoding=encoding, errors=errors), False
+
+    # Non-atomic writes directly go out through the regular open functions.
+    if not atomic:
+        return _wrap_io_open(filename, mode, encoding, errors), True
+
+    # Some usability stuff for atomic writes
+    if "a" in mode:
+        raise ValueError(
+            "Appending to an existing file is not supported, because that"
+            " would involve an expensive `copy`-operation to a temporary"
+            " file. Open the file in normal `w`-mode and copy explicitly"
+            " if that's what you're after."
+        )
+    if "x" in mode:
+        raise ValueError("Use the `overwrite`-parameter instead.")
+    if "w" not in mode:
+        raise ValueError("Atomic writes only make sense with `w`-mode.")
+
+    # Atomic writes are more complicated.  They work by opening a file
+    # as a proxy in the same folder and then using the fdopen
+    # functionality to wrap it in a Python file.  Then we wrap it in an
+    # atomic file that moves the file over on close.
+    import errno
+    import random
+
+    try:
+        perm = os.stat(filename).st_mode
+    except OSError:
+        perm = None
+
+    flags = os.O_RDWR | os.O_CREAT | os.O_EXCL
+
+    if binary:
+        flags |= getattr(os, "O_BINARY", 0)
+
+    while True:
+        tmp_filename = os.path.join(
+            os.path.dirname(filename),
+            ".__atomic-write{:08x}".format(random.randrange(1 << 32)),
+        )
+        try:
+            fd = os.open(tmp_filename, flags, 0o666 if perm is None else perm)
+            break
+        except OSError as e:
+            if e.errno == errno.EEXIST or (
+                os.name == "nt"
+                and e.errno == errno.EACCES
+                and os.path.isdir(e.filename)
+                and os.access(e.filename, os.W_OK)
+            ):
+                continue
+            raise
+
+    if perm is not None:
+        os.chmod(tmp_filename, perm)  # in case perm includes bits in umask
+
+    f = _wrap_io_open(fd, mode, encoding, errors)
+    return _AtomicFile(f, tmp_filename, os.path.realpath(filename)), True
+
+
+# Used in a destructor call, needs extra protection from interpreter cleanup.
+if hasattr(os, "replace"):
+    _replace = os.replace
+    _can_replace = True
+else:
+    _replace = os.rename
+    _can_replace = not WIN
+
+
+class _AtomicFile(object):
+    def __init__(self, f, tmp_filename, real_filename):
+        self._f = f
+        self._tmp_filename = tmp_filename
+        self._real_filename = real_filename
+        self.closed = False
+
+    @property
+    def name(self):
+        return self._real_filename
+
+    def close(self, delete=False):
+        if self.closed:
+            return
+        self._f.close()
+        if not _can_replace:
+            try:
+                os.remove(self._real_filename)
+            except OSError:
+                pass
+        _replace(self._tmp_filename, self._real_filename)
+        self.closed = True
+
+    def __getattr__(self, name):
+        return getattr(self._f, name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close(delete=exc_type is not None)
+
+    def __repr__(self):
+        return repr(self._f)
+
+
+auto_wrap_for_ansi = None
+colorama = None
+get_winterm_size = None
+
+
+def strip_ansi(value):
+    return _ansi_re.sub("", value)
+
+
+def _is_jupyter_kernel_output(stream):
+    if WIN:
+        # TODO: Couldn't test on Windows, should't try to support until
+        # someone tests the details wrt colorama.
+        return
+
+    while isinstance(stream, (_FixupStream, _NonClosingTextIOWrapper)):
+        stream = stream._stream
+
+    return stream.__class__.__module__.startswith("ipykernel.")
+
+
+def should_strip_ansi(stream=None, color=None):
+    if color is None:
+        if stream is None:
+            stream = sys.stdin
+        return not isatty(stream) and not _is_jupyter_kernel_output(stream)
+    return not color
+
+
+# If we're on Windows, we provide transparent integration through
+# colorama.  This will make ANSI colors through the echo function
+# work automatically.
+if WIN:
+    # Windows has a smaller terminal
+    DEFAULT_COLUMNS = 79
+
+    from ._winconsole import _get_windows_console_stream, _wrap_std_stream
+
+    def _get_argv_encoding():
+        import locale
+
+        return locale.getpreferredencoding()
+
+    if PY2:
+
+        def raw_input(prompt=""):
+            sys.stderr.flush()
+            if prompt:
+                stdout = _default_text_stdout()
+                stdout.write(prompt)
+            stdin = _default_text_stdin()
+            return stdin.readline().rstrip("\r\n")
+
+    try:
+        import colorama
+    except ImportError:
+        pass
+    else:
+        _ansi_stream_wrappers = WeakKeyDictionary()
+
+        def auto_wrap_for_ansi(stream, color=None):
+            """This function wraps a stream so that calls through colorama
+            are issued to the win32 console API to recolor on demand.  It
+            also ensures to reset the colors if a write call is interrupted
+            to not destroy the console afterwards.
+            """
+            try:
+                cached = _ansi_stream_wrappers.get(stream)
+            except Exception:
+                cached = None
+            if cached is not None:
+                return cached
+            strip = should_strip_ansi(stream, color)
+            ansi_wrapper = colorama.AnsiToWin32(stream, strip=strip)
+            rv = ansi_wrapper.stream
+            _write = rv.write
+
+            def _safe_write(s):
+                try:
+                    return _write(s)
+                except:
+                    ansi_wrapper.reset_all()
+                    raise
+
+            rv.write = _safe_write
+            try:
+                _ansi_stream_wrappers[stream] = rv
+            except Exception:
+                pass
+            return rv
+
+        def get_winterm_size():
+            win = colorama.win32.GetConsoleScreenBufferInfo(
+                colorama.win32.STDOUT
+            ).srWindow
+            return win.Right - win.Left, win.Bottom - win.Top
+
+
+else:
+
+    def _get_argv_encoding():
+        return getattr(sys.stdin, "encoding", None) or get_filesystem_encoding()
+
+    _get_windows_console_stream = lambda *x: None
+    _wrap_std_stream = lambda *x: None
+
+
+def term_len(x):
+    return len(strip_ansi(x))
+
+
+def isatty(stream):
+    try:
+        return stream.isatty()
+    except Exception:
+        return False
+
+
+def _make_cached_stream_func(src_func, wrapper_func):
+    cache = WeakKeyDictionary()
+
+    def func():
+        stream = src_func()
+        try:
+            rv = cache.get(stream)
+        except Exception:
+            rv = None
+        if rv is not None:
+            return rv
+        rv = wrapper_func()
+        try:
+            stream = src_func()  # In case wrapper_func() modified the stream
+            cache[stream] = rv
+        except Exception:
+            pass
+        return rv
+
+    return func
+
+
+_default_text_stdin = _make_cached_stream_func(lambda: sys.stdin, get_text_stdin)
+_default_text_stdout = _make_cached_stream_func(lambda: sys.stdout, get_text_stdout)
+_default_text_stderr = _make_cached_stream_func(lambda: sys.stderr, get_text_stderr)
+
+
+binary_streams = {
+    "stdin": get_binary_stdin,
+    "stdout": get_binary_stdout,
+    "stderr": get_binary_stderr,
+}
+
+text_streams = {
+    "stdin": get_text_stdin,
+    "stdout": get_text_stdout,
+    "stderr": get_text_stderr,
+}

+ 657 - 0
Lib/site-packages/click/_termui_impl.py

@@ -0,0 +1,657 @@
+# -*- coding: utf-8 -*-
+"""
+This module contains implementations for the termui module. To keep the
+import time of Click down, some infrequently used functionality is
+placed in this module and only imported as needed.
+"""
+import contextlib
+import math
+import os
+import sys
+import time
+
+from ._compat import _default_text_stdout
+from ._compat import CYGWIN
+from ._compat import get_best_encoding
+from ._compat import int_types
+from ._compat import isatty
+from ._compat import open_stream
+from ._compat import range_type
+from ._compat import strip_ansi
+from ._compat import term_len
+from ._compat import WIN
+from .exceptions import ClickException
+from .utils import echo
+
+if os.name == "nt":
+    BEFORE_BAR = "\r"
+    AFTER_BAR = "\n"
+else:
+    BEFORE_BAR = "\r\033[?25l"
+    AFTER_BAR = "\033[?25h\n"
+
+
+def _length_hint(obj):
+    """Returns the length hint of an object."""
+    try:
+        return len(obj)
+    except (AttributeError, TypeError):
+        try:
+            get_hint = type(obj).__length_hint__
+        except AttributeError:
+            return None
+        try:
+            hint = get_hint(obj)
+        except TypeError:
+            return None
+        if hint is NotImplemented or not isinstance(hint, int_types) or hint < 0:
+            return None
+        return hint
+
+
+class ProgressBar(object):
+    def __init__(
+        self,
+        iterable,
+        length=None,
+        fill_char="#",
+        empty_char=" ",
+        bar_template="%(bar)s",
+        info_sep="  ",
+        show_eta=True,
+        show_percent=None,
+        show_pos=False,
+        item_show_func=None,
+        label=None,
+        file=None,
+        color=None,
+        width=30,
+    ):
+        self.fill_char = fill_char
+        self.empty_char = empty_char
+        self.bar_template = bar_template
+        self.info_sep = info_sep
+        self.show_eta = show_eta
+        self.show_percent = show_percent
+        self.show_pos = show_pos
+        self.item_show_func = item_show_func
+        self.label = label or ""
+        if file is None:
+            file = _default_text_stdout()
+        self.file = file
+        self.color = color
+        self.width = width
+        self.autowidth = width == 0
+
+        if length is None:
+            length = _length_hint(iterable)
+        if iterable is None:
+            if length is None:
+                raise TypeError("iterable or length is required")
+            iterable = range_type(length)
+        self.iter = iter(iterable)
+        self.length = length
+        self.length_known = length is not None
+        self.pos = 0
+        self.avg = []
+        self.start = self.last_eta = time.time()
+        self.eta_known = False
+        self.finished = False
+        self.max_width = None
+        self.entered = False
+        self.current_item = None
+        self.is_hidden = not isatty(self.file)
+        self._last_line = None
+        self.short_limit = 0.5
+
+    def __enter__(self):
+        self.entered = True
+        self.render_progress()
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.render_finish()
+
+    def __iter__(self):
+        if not self.entered:
+            raise RuntimeError("You need to use progress bars in a with block.")
+        self.render_progress()
+        return self.generator()
+
+    def __next__(self):
+        # Iteration is defined in terms of a generator function,
+        # returned by iter(self); use that to define next(). This works
+        # because `self.iter` is an iterable consumed by that generator,
+        # so it is re-entry safe. Calling `next(self.generator())`
+        # twice works and does "what you want".
+        return next(iter(self))
+
+    # Python 2 compat
+    next = __next__
+
+    def is_fast(self):
+        return time.time() - self.start <= self.short_limit
+
+    def render_finish(self):
+        if self.is_hidden or self.is_fast():
+            return
+        self.file.write(AFTER_BAR)
+        self.file.flush()
+
+    @property
+    def pct(self):
+        if self.finished:
+            return 1.0
+        return min(self.pos / (float(self.length) or 1), 1.0)
+
+    @property
+    def time_per_iteration(self):
+        if not self.avg:
+            return 0.0
+        return sum(self.avg) / float(len(self.avg))
+
+    @property
+    def eta(self):
+        if self.length_known and not self.finished:
+            return self.time_per_iteration * (self.length - self.pos)
+        return 0.0
+
+    def format_eta(self):
+        if self.eta_known:
+            t = int(self.eta)
+            seconds = t % 60
+            t //= 60
+            minutes = t % 60
+            t //= 60
+            hours = t % 24
+            t //= 24
+            if t > 0:
+                return "{}d {:02}:{:02}:{:02}".format(t, hours, minutes, seconds)
+            else:
+                return "{:02}:{:02}:{:02}".format(hours, minutes, seconds)
+        return ""
+
+    def format_pos(self):
+        pos = str(self.pos)
+        if self.length_known:
+            pos += "/{}".format(self.length)
+        return pos
+
+    def format_pct(self):
+        return "{: 4}%".format(int(self.pct * 100))[1:]
+
+    def format_bar(self):
+        if self.length_known:
+            bar_length = int(self.pct * self.width)
+            bar = self.fill_char * bar_length
+            bar += self.empty_char * (self.width - bar_length)
+        elif self.finished:
+            bar = self.fill_char * self.width
+        else:
+            bar = list(self.empty_char * (self.width or 1))
+            if self.time_per_iteration != 0:
+                bar[
+                    int(
+                        (math.cos(self.pos * self.time_per_iteration) / 2.0 + 0.5)
+                        * self.width
+                    )
+                ] = self.fill_char
+            bar = "".join(bar)
+        return bar
+
+    def format_progress_line(self):
+        show_percent = self.show_percent
+
+        info_bits = []
+        if self.length_known and show_percent is None:
+            show_percent = not self.show_pos
+
+        if self.show_pos:
+            info_bits.append(self.format_pos())
+        if show_percent:
+            info_bits.append(self.format_pct())
+        if self.show_eta and self.eta_known and not self.finished:
+            info_bits.append(self.format_eta())
+        if self.item_show_func is not None:
+            item_info = self.item_show_func(self.current_item)
+            if item_info is not None:
+                info_bits.append(item_info)
+
+        return (
+            self.bar_template
+            % {
+                "label": self.label,
+                "bar": self.format_bar(),
+                "info": self.info_sep.join(info_bits),
+            }
+        ).rstrip()
+
+    def render_progress(self):
+        from .termui import get_terminal_size
+
+        if self.is_hidden:
+            return
+
+        buf = []
+        # Update width in case the terminal has been resized
+        if self.autowidth:
+            old_width = self.width
+            self.width = 0
+            clutter_length = term_len(self.format_progress_line())
+            new_width = max(0, get_terminal_size()[0] - clutter_length)
+            if new_width < old_width:
+                buf.append(BEFORE_BAR)
+                buf.append(" " * self.max_width)
+                self.max_width = new_width
+            self.width = new_width
+
+        clear_width = self.width
+        if self.max_width is not None:
+            clear_width = self.max_width
+
+        buf.append(BEFORE_BAR)
+        line = self.format_progress_line()
+        line_len = term_len(line)
+        if self.max_width is None or self.max_width < line_len:
+            self.max_width = line_len
+
+        buf.append(line)
+        buf.append(" " * (clear_width - line_len))
+        line = "".join(buf)
+        # Render the line only if it changed.
+
+        if line != self._last_line and not self.is_fast():
+            self._last_line = line
+            echo(line, file=self.file, color=self.color, nl=False)
+            self.file.flush()
+
+    def make_step(self, n_steps):
+        self.pos += n_steps
+        if self.length_known and self.pos >= self.length:
+            self.finished = True
+
+        if (time.time() - self.last_eta) < 1.0:
+            return
+
+        self.last_eta = time.time()
+
+        # self.avg is a rolling list of length <= 7 of steps where steps are
+        # defined as time elapsed divided by the total progress through
+        # self.length.
+        if self.pos:
+            step = (time.time() - self.start) / self.pos
+        else:
+            step = time.time() - self.start
+
+        self.avg = self.avg[-6:] + [step]
+
+        self.eta_known = self.length_known
+
+    def update(self, n_steps):
+        self.make_step(n_steps)
+        self.render_progress()
+
+    def finish(self):
+        self.eta_known = 0
+        self.current_item = None
+        self.finished = True
+
+    def generator(self):
+        """Return a generator which yields the items added to the bar
+        during construction, and updates the progress bar *after* the
+        yielded block returns.
+        """
+        # WARNING: the iterator interface for `ProgressBar` relies on
+        # this and only works because this is a simple generator which
+        # doesn't create or manage additional state. If this function
+        # changes, the impact should be evaluated both against
+        # `iter(bar)` and `next(bar)`. `next()` in particular may call
+        # `self.generator()` repeatedly, and this must remain safe in
+        # order for that interface to work.
+        if not self.entered:
+            raise RuntimeError("You need to use progress bars in a with block.")
+
+        if self.is_hidden:
+            for rv in self.iter:
+                yield rv
+        else:
+            for rv in self.iter:
+                self.current_item = rv
+                yield rv
+                self.update(1)
+            self.finish()
+            self.render_progress()
+
+
+def pager(generator, color=None):
+    """Decide what method to use for paging through text."""
+    stdout = _default_text_stdout()
+    if not isatty(sys.stdin) or not isatty(stdout):
+        return _nullpager(stdout, generator, color)
+    pager_cmd = (os.environ.get("PAGER", None) or "").strip()
+    if pager_cmd:
+        if WIN:
+            return _tempfilepager(generator, pager_cmd, color)
+        return _pipepager(generator, pager_cmd, color)
+    if os.environ.get("TERM") in ("dumb", "emacs"):
+        return _nullpager(stdout, generator, color)
+    if WIN or sys.platform.startswith("os2"):
+        return _tempfilepager(generator, "more <", color)
+    if hasattr(os, "system") and os.system("(less) 2>/dev/null") == 0:
+        return _pipepager(generator, "less", color)
+
+    import tempfile
+
+    fd, filename = tempfile.mkstemp()
+    os.close(fd)
+    try:
+        if hasattr(os, "system") and os.system('more "{}"'.format(filename)) == 0:
+            return _pipepager(generator, "more", color)
+        return _nullpager(stdout, generator, color)
+    finally:
+        os.unlink(filename)
+
+
+def _pipepager(generator, cmd, color):
+    """Page through text by feeding it to another program.  Invoking a
+    pager through this might support colors.
+    """
+    import subprocess
+
+    env = dict(os.environ)
+
+    # If we're piping to less we might support colors under the
+    # condition that
+    cmd_detail = cmd.rsplit("/", 1)[-1].split()
+    if color is None and cmd_detail[0] == "less":
+        less_flags = "{}{}".format(os.environ.get("LESS", ""), " ".join(cmd_detail[1:]))
+        if not less_flags:
+            env["LESS"] = "-R"
+            color = True
+        elif "r" in less_flags or "R" in less_flags:
+            color = True
+
+    c = subprocess.Popen(cmd, shell=True, stdin=subprocess.PIPE, env=env)
+    encoding = get_best_encoding(c.stdin)
+    try:
+        for text in generator:
+            if not color:
+                text = strip_ansi(text)
+
+            c.stdin.write(text.encode(encoding, "replace"))
+    except (IOError, KeyboardInterrupt):
+        pass
+    else:
+        c.stdin.close()
+
+    # Less doesn't respect ^C, but catches it for its own UI purposes (aborting
+    # search or other commands inside less).
+    #
+    # That means when the user hits ^C, the parent process (click) terminates,
+    # but less is still alive, paging the output and messing up the terminal.
+    #
+    # If the user wants to make the pager exit on ^C, they should set
+    # `LESS='-K'`. It's not our decision to make.
+    while True:
+        try:
+            c.wait()
+        except KeyboardInterrupt:
+            pass
+        else:
+            break
+
+
+def _tempfilepager(generator, cmd, color):
+    """Page through text by invoking a program on a temporary file."""
+    import tempfile
+
+    filename = tempfile.mktemp()
+    # TODO: This never terminates if the passed generator never terminates.
+    text = "".join(generator)
+    if not color:
+        text = strip_ansi(text)
+    encoding = get_best_encoding(sys.stdout)
+    with open_stream(filename, "wb")[0] as f:
+        f.write(text.encode(encoding))
+    try:
+        os.system('{} "{}"'.format(cmd, filename))
+    finally:
+        os.unlink(filename)
+
+
+def _nullpager(stream, generator, color):
+    """Simply print unformatted text.  This is the ultimate fallback."""
+    for text in generator:
+        if not color:
+            text = strip_ansi(text)
+        stream.write(text)
+
+
+class Editor(object):
+    def __init__(self, editor=None, env=None, require_save=True, extension=".txt"):
+        self.editor = editor
+        self.env = env
+        self.require_save = require_save
+        self.extension = extension
+
+    def get_editor(self):
+        if self.editor is not None:
+            return self.editor
+        for key in "VISUAL", "EDITOR":
+            rv = os.environ.get(key)
+            if rv:
+                return rv
+        if WIN:
+            return "notepad"
+        for editor in "sensible-editor", "vim", "nano":
+            if os.system("which {} >/dev/null 2>&1".format(editor)) == 0:
+                return editor
+        return "vi"
+
+    def edit_file(self, filename):
+        import subprocess
+
+        editor = self.get_editor()
+        if self.env:
+            environ = os.environ.copy()
+            environ.update(self.env)
+        else:
+            environ = None
+        try:
+            c = subprocess.Popen(
+                '{} "{}"'.format(editor, filename), env=environ, shell=True,
+            )
+            exit_code = c.wait()
+            if exit_code != 0:
+                raise ClickException("{}: Editing failed!".format(editor))
+        except OSError as e:
+            raise ClickException("{}: Editing failed: {}".format(editor, e))
+
+    def edit(self, text):
+        import tempfile
+
+        text = text or ""
+        if text and not text.endswith("\n"):
+            text += "\n"
+
+        fd, name = tempfile.mkstemp(prefix="editor-", suffix=self.extension)
+        try:
+            if WIN:
+                encoding = "utf-8-sig"
+                text = text.replace("\n", "\r\n")
+            else:
+                encoding = "utf-8"
+            text = text.encode(encoding)
+
+            f = os.fdopen(fd, "wb")
+            f.write(text)
+            f.close()
+            timestamp = os.path.getmtime(name)
+
+            self.edit_file(name)
+
+            if self.require_save and os.path.getmtime(name) == timestamp:
+                return None
+
+            f = open(name, "rb")
+            try:
+                rv = f.read()
+            finally:
+                f.close()
+            return rv.decode("utf-8-sig").replace("\r\n", "\n")
+        finally:
+            os.unlink(name)
+
+
+def open_url(url, wait=False, locate=False):
+    import subprocess
+
+    def _unquote_file(url):
+        try:
+            import urllib
+        except ImportError:
+            import urllib
+        if url.startswith("file://"):
+            url = urllib.unquote(url[7:])
+        return url
+
+    if sys.platform == "darwin":
+        args = ["open"]
+        if wait:
+            args.append("-W")
+        if locate:
+            args.append("-R")
+        args.append(_unquote_file(url))
+        null = open("/dev/null", "w")
+        try:
+            return subprocess.Popen(args, stderr=null).wait()
+        finally:
+            null.close()
+    elif WIN:
+        if locate:
+            url = _unquote_file(url)
+            args = 'explorer /select,"{}"'.format(_unquote_file(url.replace('"', "")))
+        else:
+            args = 'start {} "" "{}"'.format(
+                "/WAIT" if wait else "", url.replace('"', "")
+            )
+        return os.system(args)
+    elif CYGWIN:
+        if locate:
+            url = _unquote_file(url)
+            args = 'cygstart "{}"'.format(os.path.dirname(url).replace('"', ""))
+        else:
+            args = 'cygstart {} "{}"'.format("-w" if wait else "", url.replace('"', ""))
+        return os.system(args)
+
+    try:
+        if locate:
+            url = os.path.dirname(_unquote_file(url)) or "."
+        else:
+            url = _unquote_file(url)
+        c = subprocess.Popen(["xdg-open", url])
+        if wait:
+            return c.wait()
+        return 0
+    except OSError:
+        if url.startswith(("http://", "https://")) and not locate and not wait:
+            import webbrowser
+
+            webbrowser.open(url)
+            return 0
+        return 1
+
+
+def _translate_ch_to_exc(ch):
+    if ch == u"\x03":
+        raise KeyboardInterrupt()
+    if ch == u"\x04" and not WIN:  # Unix-like, Ctrl+D
+        raise EOFError()
+    if ch == u"\x1a" and WIN:  # Windows, Ctrl+Z
+        raise EOFError()
+
+
+if WIN:
+    import msvcrt
+
+    @contextlib.contextmanager
+    def raw_terminal():
+        yield
+
+    def getchar(echo):
+        # The function `getch` will return a bytes object corresponding to
+        # the pressed character. Since Windows 10 build 1803, it will also
+        # return \x00 when called a second time after pressing a regular key.
+        #
+        # `getwch` does not share this probably-bugged behavior. Moreover, it
+        # returns a Unicode object by default, which is what we want.
+        #
+        # Either of these functions will return \x00 or \xe0 to indicate
+        # a special key, and you need to call the same function again to get
+        # the "rest" of the code. The fun part is that \u00e0 is
+        # "latin small letter a with grave", so if you type that on a French
+        # keyboard, you _also_ get a \xe0.
+        # E.g., consider the Up arrow. This returns \xe0 and then \x48. The
+        # resulting Unicode string reads as "a with grave" + "capital H".
+        # This is indistinguishable from when the user actually types
+        # "a with grave" and then "capital H".
+        #
+        # When \xe0 is returned, we assume it's part of a special-key sequence
+        # and call `getwch` again, but that means that when the user types
+        # the \u00e0 character, `getchar` doesn't return until a second
+        # character is typed.
+        # The alternative is returning immediately, but that would mess up
+        # cross-platform handling of arrow keys and others that start with
+        # \xe0. Another option is using `getch`, but then we can't reliably
+        # read non-ASCII characters, because return values of `getch` are
+        # limited to the current 8-bit codepage.
+        #
+        # Anyway, Click doesn't claim to do this Right(tm), and using `getwch`
+        # is doing the right thing in more situations than with `getch`.
+        if echo:
+            func = msvcrt.getwche
+        else:
+            func = msvcrt.getwch
+
+        rv = func()
+        if rv in (u"\x00", u"\xe0"):
+            # \x00 and \xe0 are control characters that indicate special key,
+            # see above.
+            rv += func()
+        _translate_ch_to_exc(rv)
+        return rv
+
+
+else:
+    import tty
+    import termios
+
+    @contextlib.contextmanager
+    def raw_terminal():
+        if not isatty(sys.stdin):
+            f = open("/dev/tty")
+            fd = f.fileno()
+        else:
+            fd = sys.stdin.fileno()
+            f = None
+        try:
+            old_settings = termios.tcgetattr(fd)
+            try:
+                tty.setraw(fd)
+                yield fd
+            finally:
+                termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
+                sys.stdout.flush()
+                if f is not None:
+                    f.close()
+        except termios.error:
+            pass
+
+    def getchar(echo):
+        with raw_terminal() as fd:
+            ch = os.read(fd, 32)
+            ch = ch.decode(get_best_encoding(sys.stdin), "replace")
+            if echo and isatty(sys.stdout):
+                sys.stdout.write(ch)
+            _translate_ch_to_exc(ch)
+            return ch

+ 37 - 0
Lib/site-packages/click/_textwrap.py

@@ -0,0 +1,37 @@
+import textwrap
+from contextlib import contextmanager
+
+
+class TextWrapper(textwrap.TextWrapper):
+    def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
+        space_left = max(width - cur_len, 1)
+
+        if self.break_long_words:
+            last = reversed_chunks[-1]
+            cut = last[:space_left]
+            res = last[space_left:]
+            cur_line.append(cut)
+            reversed_chunks[-1] = res
+        elif not cur_line:
+            cur_line.append(reversed_chunks.pop())
+
+    @contextmanager
+    def extra_indent(self, indent):
+        old_initial_indent = self.initial_indent
+        old_subsequent_indent = self.subsequent_indent
+        self.initial_indent += indent
+        self.subsequent_indent += indent
+        try:
+            yield
+        finally:
+            self.initial_indent = old_initial_indent
+            self.subsequent_indent = old_subsequent_indent
+
+    def indent_only(self, text):
+        rv = []
+        for idx, line in enumerate(text.splitlines()):
+            indent = self.initial_indent
+            if idx > 0:
+                indent = self.subsequent_indent
+            rv.append(indent + line)
+        return "\n".join(rv)

+ 131 - 0
Lib/site-packages/click/_unicodefun.py

@@ -0,0 +1,131 @@
+import codecs
+import os
+import sys
+
+from ._compat import PY2
+
+
+def _find_unicode_literals_frame():
+    import __future__
+
+    if not hasattr(sys, "_getframe"):  # not all Python implementations have it
+        return 0
+    frm = sys._getframe(1)
+    idx = 1
+    while frm is not None:
+        if frm.f_globals.get("__name__", "").startswith("click."):
+            frm = frm.f_back
+            idx += 1
+        elif frm.f_code.co_flags & __future__.unicode_literals.compiler_flag:
+            return idx
+        else:
+            break
+    return 0
+
+
+def _check_for_unicode_literals():
+    if not __debug__:
+        return
+
+    from . import disable_unicode_literals_warning
+
+    if not PY2 or disable_unicode_literals_warning:
+        return
+    bad_frame = _find_unicode_literals_frame()
+    if bad_frame <= 0:
+        return
+    from warnings import warn
+
+    warn(
+        Warning(
+            "Click detected the use of the unicode_literals __future__"
+            " import. This is heavily discouraged because it can"
+            " introduce subtle bugs in your code. You should instead"
+            ' use explicit u"" literals for your unicode strings. For'
+            " more information see"
+            " https://click.palletsprojects.com/python3/"
+        ),
+        stacklevel=bad_frame,
+    )
+
+
+def _verify_python3_env():
+    """Ensures that the environment is good for unicode on Python 3."""
+    if PY2:
+        return
+    try:
+        import locale
+
+        fs_enc = codecs.lookup(locale.getpreferredencoding()).name
+    except Exception:
+        fs_enc = "ascii"
+    if fs_enc != "ascii":
+        return
+
+    extra = ""
+    if os.name == "posix":
+        import subprocess
+
+        try:
+            rv = subprocess.Popen(
+                ["locale", "-a"], stdout=subprocess.PIPE, stderr=subprocess.PIPE
+            ).communicate()[0]
+        except OSError:
+            rv = b""
+        good_locales = set()
+        has_c_utf8 = False
+
+        # Make sure we're operating on text here.
+        if isinstance(rv, bytes):
+            rv = rv.decode("ascii", "replace")
+
+        for line in rv.splitlines():
+            locale = line.strip()
+            if locale.lower().endswith((".utf-8", ".utf8")):
+                good_locales.add(locale)
+                if locale.lower() in ("c.utf8", "c.utf-8"):
+                    has_c_utf8 = True
+
+        extra += "\n\n"
+        if not good_locales:
+            extra += (
+                "Additional information: on this system no suitable"
+                " UTF-8 locales were discovered. This most likely"
+                " requires resolving by reconfiguring the locale"
+                " system."
+            )
+        elif has_c_utf8:
+            extra += (
+                "This system supports the C.UTF-8 locale which is"
+                " recommended. You might be able to resolve your issue"
+                " by exporting the following environment variables:\n\n"
+                "    export LC_ALL=C.UTF-8\n"
+                "    export LANG=C.UTF-8"
+            )
+        else:
+            extra += (
+                "This system lists a couple of UTF-8 supporting locales"
+                " that you can pick from. The following suitable"
+                " locales were discovered: {}".format(", ".join(sorted(good_locales)))
+            )
+
+        bad_locale = None
+        for locale in os.environ.get("LC_ALL"), os.environ.get("LANG"):
+            if locale and locale.lower().endswith((".utf-8", ".utf8")):
+                bad_locale = locale
+            if locale is not None:
+                break
+        if bad_locale is not None:
+            extra += (
+                "\n\nClick discovered that you exported a UTF-8 locale"
+                " but the locale system could not pick up from it"
+                " because it does not exist. The exported locale is"
+                " '{}' but it is not supported".format(bad_locale)
+            )
+
+    raise RuntimeError(
+        "Click will abort further execution because Python 3 was"
+        " configured to use ASCII as encoding for the environment."
+        " Consult https://click.palletsprojects.com/python3/ for"
+        " mitigation steps.{}".format(extra)
+    )

+ 370 - 0
Lib/site-packages/click/_winconsole.py

@@ -0,0 +1,370 @@
+# -*- coding: utf-8 -*-
+# This module is based on the excellent work by Adam Bartoš who
+# provided a lot of what went into the implementation here in
+# the discussion to issue1602 in the Python bug tracker.
+#
+# There are some general differences in regards to how this works
+# compared to the original patches as we do not need to patch
+# the entire interpreter but just work in our little world of
+# echo and prmopt.
+import ctypes
+import io
+import os
+import sys
+import time
+import zlib
+from ctypes import byref
+from ctypes import c_char
+from ctypes import c_char_p
+from ctypes import c_int
+from ctypes import c_ssize_t
+from ctypes import c_ulong
+from ctypes import c_void_p
+from ctypes import POINTER
+from ctypes import py_object
+from ctypes import windll
+from ctypes import WinError
+from ctypes import WINFUNCTYPE
+from ctypes.wintypes import DWORD
+from ctypes.wintypes import HANDLE
+from ctypes.wintypes import LPCWSTR
+from ctypes.wintypes import LPWSTR
+
+import msvcrt
+
+from ._compat import _NonClosingTextIOWrapper
+from ._compat import PY2
+from ._compat import text_type
+
+try:
+    from ctypes import pythonapi
+
+    PyObject_GetBuffer = pythonapi.PyObject_GetBuffer
+    PyBuffer_Release = pythonapi.PyBuffer_Release
+except ImportError:
+    pythonapi = None
+
+
+c_ssize_p = POINTER(c_ssize_t)
+
+kernel32 = windll.kernel32
+GetStdHandle = kernel32.GetStdHandle
+ReadConsoleW = kernel32.ReadConsoleW
+WriteConsoleW = kernel32.WriteConsoleW
+GetConsoleMode = kernel32.GetConsoleMode
+GetLastError = kernel32.GetLastError
+GetCommandLineW = WINFUNCTYPE(LPWSTR)(("GetCommandLineW", windll.kernel32))
+CommandLineToArgvW = WINFUNCTYPE(POINTER(LPWSTR), LPCWSTR, POINTER(c_int))(
+    ("CommandLineToArgvW", windll.shell32)
+)
+LocalFree = WINFUNCTYPE(ctypes.c_void_p, ctypes.c_void_p)(
+    ("LocalFree", windll.kernel32)
+)
+
+
+STDIN_HANDLE = GetStdHandle(-10)
+STDOUT_HANDLE = GetStdHandle(-11)
+STDERR_HANDLE = GetStdHandle(-12)
+
+
+PyBUF_SIMPLE = 0
+PyBUF_WRITABLE = 1
+
+ERROR_SUCCESS = 0
+ERROR_NOT_ENOUGH_MEMORY = 8
+ERROR_OPERATION_ABORTED = 995
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+EOF = b"\x1a"
+MAX_BYTES_WRITTEN = 32767
+
+
+class Py_buffer(ctypes.Structure):
+    _fields_ = [
+        ("buf", c_void_p),
+        ("obj", py_object),
+        ("len", c_ssize_t),
+        ("itemsize", c_ssize_t),
+        ("readonly", c_int),
+        ("ndim", c_int),
+        ("format", c_char_p),
+        ("shape", c_ssize_p),
+        ("strides", c_ssize_p),
+        ("suboffsets", c_ssize_p),
+        ("internal", c_void_p),
+    ]
+
+    if PY2:
+        _fields_.insert(-1, ("smalltable", c_ssize_t * 2))
+
+
+# On PyPy we cannot get buffers so our ability to operate here is
+# serverly limited.
+if pythonapi is None:
+    get_buffer = None
+else:
+
+    def get_buffer(obj, writable=False):
+        buf = Py_buffer()
+        flags = PyBUF_WRITABLE if writable else PyBUF_SIMPLE
+        PyObject_GetBuffer(py_object(obj), byref(buf), flags)
+        try:
+            buffer_type = c_char * buf.len
+            return buffer_type.from_address(buf.buf)
+        finally:
+            PyBuffer_Release(byref(buf))
+
+
+class _WindowsConsoleRawIOBase(io.RawIOBase):
+    def __init__(self, handle):
+        self.handle = handle
+
+    def isatty(self):
+        io.RawIOBase.isatty(self)
+        return True
+
+
+class _WindowsConsoleReader(_WindowsConsoleRawIOBase):
+    def readable(self):
+        return True
+
+    def readinto(self, b):
+        bytes_to_be_read = len(b)
+        if not bytes_to_be_read:
+            return 0
+        elif bytes_to_be_read % 2:
+            raise ValueError(
+                "cannot read odd number of bytes from UTF-16-LE encoded console"
+            )
+
+        buffer = get_buffer(b, writable=True)
+        code_units_to_be_read = bytes_to_be_read // 2
+        code_units_read = c_ulong()
+
+        rv = ReadConsoleW(
+            HANDLE(self.handle),
+            buffer,
+            code_units_to_be_read,
+            byref(code_units_read),
+            None,
+        )
+        if GetLastError() == ERROR_OPERATION_ABORTED:
+            # wait for KeyboardInterrupt
+            time.sleep(0.1)
+        if not rv:
+            raise OSError("Windows error: {}".format(GetLastError()))
+
+        if buffer[0] == EOF:
+            return 0
+        return 2 * code_units_read.value
+
+
+class _WindowsConsoleWriter(_WindowsConsoleRawIOBase):
+    def writable(self):
+        return True
+
+    @staticmethod
+    def _get_error_message(errno):
+        if errno == ERROR_SUCCESS:
+            return "ERROR_SUCCESS"
+        elif errno == ERROR_NOT_ENOUGH_MEMORY:
+            return "ERROR_NOT_ENOUGH_MEMORY"
+        return "Windows error {}".format(errno)
+
+    def write(self, b):
+        bytes_to_be_written = len(b)
+        buf = get_buffer(b)
+        code_units_to_be_written = min(bytes_to_be_written, MAX_BYTES_WRITTEN) // 2
+        code_units_written = c_ulong()
+
+        WriteConsoleW(
+            HANDLE(self.handle),
+            buf,
+            code_units_to_be_written,
+            byref(code_units_written),
+            None,
+        )
+        bytes_written = 2 * code_units_written.value
+
+        if bytes_written == 0 and bytes_to_be_written > 0:
+            raise OSError(self._get_error_message(GetLastError()))
+        return bytes_written
+
+
+class ConsoleStream(object):
+    def __init__(self, text_stream, byte_stream):
+        self._text_stream = text_stream
+        self.buffer = byte_stream
+
+    @property
+    def name(self):
+        return self.buffer.name
+
+    def write(self, x):
+        if isinstance(x, text_type):
+            return self._text_stream.write(x)
+        try:
+            self.flush()
+        except Exception:
+            pass
+        return self.buffer.write(x)
+
+    def writelines(self, lines):
+        for line in lines:
+            self.write(line)
+
+    def __getattr__(self, name):
+        return getattr(self._text_stream, name)
+
+    def isatty(self):
+        return self.buffer.isatty()
+
+    def __repr__(self):
+        return "<ConsoleStream name={!r} encoding={!r}>".format(
+            self.name, self.encoding
+        )
+
+
+class WindowsChunkedWriter(object):
+    """
+    Wraps a stream (such as stdout), acting as a transparent proxy for all
+    attribute access apart from method 'write()' which we wrap to write in
+    limited chunks due to a Windows limitation on binary console streams.
+    """
+
+    def __init__(self, wrapped):
+        # double-underscore everything to prevent clashes with names of
+        # attributes on the wrapped stream object.
+        self.__wrapped = wrapped
+
+    def __getattr__(self, name):
+        return getattr(self.__wrapped, name)
+
+    def write(self, text):
+        total_to_write = len(text)
+        written = 0
+
+        while written < total_to_write:
+            to_write = min(total_to_write - written, MAX_BYTES_WRITTEN)
+            self.__wrapped.write(text[written : written + to_write])
+            written += to_write
+
+
+_wrapped_std_streams = set()
+
+
+def _wrap_std_stream(name):
+    # Python 2 & Windows 7 and below
+    if (
+        PY2
+        and sys.getwindowsversion()[:2] <= (6, 1)
+        and name not in _wrapped_std_streams
+    ):
+        setattr(sys, name, WindowsChunkedWriter(getattr(sys, name)))
+        _wrapped_std_streams.add(name)
+
+
+def _get_text_stdin(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)),
+        "utf-16-le",
+        "strict",
+        line_buffering=True,
+    )
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stdout(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedWriter(_WindowsConsoleWriter(STDOUT_HANDLE)),
+        "utf-16-le",
+        "strict",
+        line_buffering=True,
+    )
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+def _get_text_stderr(buffer_stream):
+    text_stream = _NonClosingTextIOWrapper(
+        io.BufferedWriter(_WindowsConsoleWriter(STDERR_HANDLE)),
+        "utf-16-le",
+        "strict",
+        line_buffering=True,
+    )
+    return ConsoleStream(text_stream, buffer_stream)
+
+
+if PY2:
+
+    def _hash_py_argv():
+        return zlib.crc32("\x00".join(sys.argv[1:]))
+
+    _initial_argv_hash = _hash_py_argv()
+
+    def _get_windows_argv():
+        argc = c_int(0)
+        argv_unicode = CommandLineToArgvW(GetCommandLineW(), byref(argc))
+        if not argv_unicode:
+            raise WinError()
+        try:
+            argv = [argv_unicode[i] for i in range(0, argc.value)]
+        finally:
+            LocalFree(argv_unicode)
+            del argv_unicode
+
+        if not hasattr(sys, "frozen"):
+            argv = argv[1:]
+            while len(argv) > 0:
+                arg = argv[0]
+                if not arg.startswith("-") or arg == "-":
+                    break
+                argv = argv[1:]
+                if arg.startswith(("-c", "-m")):
+                    break
+
+        return argv[1:]
+
+
+_stream_factories = {
+    0: _get_text_stdin,
+    1: _get_text_stdout,
+    2: _get_text_stderr,
+}
+
+
+def _is_console(f):
+    if not hasattr(f, "fileno"):
+        return False
+
+    try:
+        fileno = f.fileno()
+    except OSError:
+        return False
+
+    handle = msvcrt.get_osfhandle(fileno)
+    return bool(GetConsoleMode(handle, byref(DWORD())))
+
+
+def _get_windows_console_stream(f, encoding, errors):
+    if (
+        get_buffer is not None
+        and encoding in ("utf-16-le", None)
+        and errors in ("strict", None)
+        and _is_console(f)
+    ):
+        func = _stream_factories.get(f.fileno())
+        if func is not None:
+            if not PY2:
+                f = getattr(f, "buffer", None)
+                if f is None:
+                    return None
+            else:
+                # If we are on Python 2 we need to set the stream that we
+                # deal with to binary mode as otherwise the exercise if a
+                # bit moot.  The same problems apply as for
+                # get_binary_stdin and friends from _compat.
+                msvcrt.setmode(f.fileno(), os.O_BINARY)
+            return func(f)

Файловите разлики са ограничени, защото са твърде много
+ 2030 - 0
Lib/site-packages/click/core.py


+ 333 - 0
Lib/site-packages/click/decorators.py

@@ -0,0 +1,333 @@
+import inspect
+import sys
+from functools import update_wrapper
+
+from ._compat import iteritems
+from ._unicodefun import _check_for_unicode_literals
+from .core import Argument
+from .core import Command
+from .core import Group
+from .core import Option
+from .globals import get_current_context
+from .utils import echo
+
+
+def pass_context(f):
+    """Marks a callback as wanting to receive the current context
+    object as first argument.
+    """
+
+    def new_func(*args, **kwargs):
+        return f(get_current_context(), *args, **kwargs)
+
+    return update_wrapper(new_func, f)
+
+
+def pass_obj(f):
+    """Similar to :func:`pass_context`, but only pass the object on the
+    context onwards (:attr:`Context.obj`).  This is useful if that object
+    represents the state of a nested system.
+    """
+
+    def new_func(*args, **kwargs):
+        return f(get_current_context().obj, *args, **kwargs)
+
+    return update_wrapper(new_func, f)
+
+
+def make_pass_decorator(object_type, ensure=False):
+    """Given an object type this creates a decorator that will work
+    similar to :func:`pass_obj` but instead of passing the object of the
+    current context, it will find the innermost context of type
+    :func:`object_type`.
+
+    This generates a decorator that works roughly like this::
+
+        from functools import update_wrapper
+
+        def decorator(f):
+            @pass_context
+            def new_func(ctx, *args, **kwargs):
+                obj = ctx.find_object(object_type)
+                return ctx.invoke(f, obj, *args, **kwargs)
+            return update_wrapper(new_func, f)
+        return decorator
+
+    :param object_type: the type of the object to pass.
+    :param ensure: if set to `True`, a new object will be created and
+                   remembered on the context if it's not there yet.
+    """
+
+    def decorator(f):
+        def new_func(*args, **kwargs):
+            ctx = get_current_context()
+            if ensure:
+                obj = ctx.ensure_object(object_type)
+            else:
+                obj = ctx.find_object(object_type)
+            if obj is None:
+                raise RuntimeError(
+                    "Managed to invoke callback without a context"
+                    " object of type '{}' existing".format(object_type.__name__)
+                )
+            return ctx.invoke(f, obj, *args, **kwargs)
+
+        return update_wrapper(new_func, f)
+
+    return decorator
+
+
+def _make_command(f, name, attrs, cls):
+    if isinstance(f, Command):
+        raise TypeError("Attempted to convert a callback into a command twice.")
+    try:
+        params = f.__click_params__
+        params.reverse()
+        del f.__click_params__
+    except AttributeError:
+        params = []
+    help = attrs.get("help")
+    if help is None:
+        help = inspect.getdoc(f)
+        if isinstance(help, bytes):
+            help = help.decode("utf-8")
+    else:
+        help = inspect.cleandoc(help)
+    attrs["help"] = help
+    _check_for_unicode_literals()
+    return cls(
+        name=name or f.__name__.lower().replace("_", "-"),
+        callback=f,
+        params=params,
+        **attrs
+    )
+
+
+def command(name=None, cls=None, **attrs):
+    r"""Creates a new :class:`Command` and uses the decorated function as
+    callback.  This will also automatically attach all decorated
+    :func:`option`\s and :func:`argument`\s as parameters to the command.
+
+    The name of the command defaults to the name of the function with
+    underscores replaced by dashes.  If you want to change that, you can
+    pass the intended name as the first argument.
+
+    All keyword arguments are forwarded to the underlying command class.
+
+    Once decorated the function turns into a :class:`Command` instance
+    that can be invoked as a command line utility or be attached to a
+    command :class:`Group`.
+
+    :param name: the name of the command.  This defaults to the function
+                 name with underscores replaced by dashes.
+    :param cls: the command class to instantiate.  This defaults to
+                :class:`Command`.
+    """
+    if cls is None:
+        cls = Command
+
+    def decorator(f):
+        cmd = _make_command(f, name, attrs, cls)
+        cmd.__doc__ = f.__doc__
+        return cmd
+
+    return decorator
+
+
+def group(name=None, **attrs):
+    """Creates a new :class:`Group` with a function as callback.  This
+    works otherwise the same as :func:`command` just that the `cls`
+    parameter is set to :class:`Group`.
+    """
+    attrs.setdefault("cls", Group)
+    return command(name, **attrs)
+
+
+def _param_memo(f, param):
+    if isinstance(f, Command):
+        f.params.append(param)
+    else:
+        if not hasattr(f, "__click_params__"):
+            f.__click_params__ = []
+        f.__click_params__.append(param)
+
+
+def argument(*param_decls, **attrs):
+    """Attaches an argument to the command.  All positional arguments are
+    passed as parameter declarations to :class:`Argument`; all keyword
+    arguments are forwarded unchanged (except ``cls``).
+    This is equivalent to creating an :class:`Argument` instance manually
+    and attaching it to the :attr:`Command.params` list.
+
+    :param cls: the argument class to instantiate.  This defaults to
+                :class:`Argument`.
+    """
+
+    def decorator(f):
+        ArgumentClass = attrs.pop("cls", Argument)
+        _param_memo(f, ArgumentClass(param_decls, **attrs))
+        return f
+
+    return decorator
+
+
+def option(*param_decls, **attrs):
+    """Attaches an option to the command.  All positional arguments are
+    passed as parameter declarations to :class:`Option`; all keyword
+    arguments are forwarded unchanged (except ``cls``).
+    This is equivalent to creating an :class:`Option` instance manually
+    and attaching it to the :attr:`Command.params` list.
+
+    :param cls: the option class to instantiate.  This defaults to
+                :class:`Option`.
+    """
+
+    def decorator(f):
+        # Issue 926, copy attrs, so pre-defined options can re-use the same cls=
+        option_attrs = attrs.copy()
+
+        if "help" in option_attrs:
+            option_attrs["help"] = inspect.cleandoc(option_attrs["help"])
+        OptionClass = option_attrs.pop("cls", Option)
+        _param_memo(f, OptionClass(param_decls, **option_attrs))
+        return f
+
+    return decorator
+
+
+def confirmation_option(*param_decls, **attrs):
+    """Shortcut for confirmation prompts that can be ignored by passing
+    ``--yes`` as parameter.
+
+    This is equivalent to decorating a function with :func:`option` with
+    the following parameters::
+
+        def callback(ctx, param, value):
+            if not value:
+                ctx.abort()
+
+        @click.command()
+        @click.option('--yes', is_flag=True, callback=callback,
+                      expose_value=False, prompt='Do you want to continue?')
+        def dropdb():
+            pass
+    """
+
+    def decorator(f):
+        def callback(ctx, param, value):
+            if not value:
+                ctx.abort()
+
+        attrs.setdefault("is_flag", True)
+        attrs.setdefault("callback", callback)
+        attrs.setdefault("expose_value", False)
+        attrs.setdefault("prompt", "Do you want to continue?")
+        attrs.setdefault("help", "Confirm the action without prompting.")
+        return option(*(param_decls or ("--yes",)), **attrs)(f)
+
+    return decorator
+
+
+def password_option(*param_decls, **attrs):
+    """Shortcut for password prompts.
+
+    This is equivalent to decorating a function with :func:`option` with
+    the following parameters::
+
+        @click.command()
+        @click.option('--password', prompt=True, confirmation_prompt=True,
+                      hide_input=True)
+        def changeadmin(password):
+            pass
+    """
+
+    def decorator(f):
+        attrs.setdefault("prompt", True)
+        attrs.setdefault("confirmation_prompt", True)
+        attrs.setdefault("hide_input", True)
+        return option(*(param_decls or ("--password",)), **attrs)(f)
+
+    return decorator
+
+
+def version_option(version=None, *param_decls, **attrs):
+    """Adds a ``--version`` option which immediately ends the program
+    printing out the version number.  This is implemented as an eager
+    option that prints the version and exits the program in the callback.
+
+    :param version: the version number to show.  If not provided Click
+                    attempts an auto discovery via setuptools.
+    :param prog_name: the name of the program (defaults to autodetection)
+    :param message: custom message to show instead of the default
+                    (``'%(prog)s, version %(version)s'``)
+    :param others: everything else is forwarded to :func:`option`.
+    """
+    if version is None:
+        if hasattr(sys, "_getframe"):
+            module = sys._getframe(1).f_globals.get("__name__")
+        else:
+            module = ""
+
+    def decorator(f):
+        prog_name = attrs.pop("prog_name", None)
+        message = attrs.pop("message", "%(prog)s, version %(version)s")
+
+        def callback(ctx, param, value):
+            if not value or ctx.resilient_parsing:
+                return
+            prog = prog_name
+            if prog is None:
+                prog = ctx.find_root().info_name
+            ver = version
+            if ver is None:
+                try:
+                    import pkg_resources
+                except ImportError:
+                    pass
+                else:
+                    for dist in pkg_resources.working_set:
+                        scripts = dist.get_entry_map().get("console_scripts") or {}
+                        for _, entry_point in iteritems(scripts):
+                            if entry_point.module_name == module:
+                                ver = dist.version
+                                break
+                if ver is None:
+                    raise RuntimeError("Could not determine version")
+            echo(message % {"prog": prog, "version": ver}, color=ctx.color)
+            ctx.exit()
+
+        attrs.setdefault("is_flag", True)
+        attrs.setdefault("expose_value", False)
+        attrs.setdefault("is_eager", True)
+        attrs.setdefault("help", "Show the version and exit.")
+        attrs["callback"] = callback
+        return option(*(param_decls or ("--version",)), **attrs)(f)
+
+    return decorator
+
+
+def help_option(*param_decls, **attrs):
+    """Adds a ``--help`` option which immediately ends the program
+    printing out the help page.  This is usually unnecessary to add as
+    this is added by default to all commands unless suppressed.
+
+    Like :func:`version_option`, this is implemented as eager option that
+    prints in the callback and exits.
+
+    All arguments are forwarded to :func:`option`.
+    """
+
+    def decorator(f):
+        def callback(ctx, param, value):
+            if value and not ctx.resilient_parsing:
+                echo(ctx.get_help(), color=ctx.color)
+                ctx.exit()
+
+        attrs.setdefault("is_flag", True)
+        attrs.setdefault("expose_value", False)
+        attrs.setdefault("help", "Show this message and exit.")
+        attrs.setdefault("is_eager", True)
+        attrs["callback"] = callback
+        return option(*(param_decls or ("--help",)), **attrs)(f)
+
+    return decorator

+ 253 - 0
Lib/site-packages/click/exceptions.py

@@ -0,0 +1,253 @@
+from ._compat import filename_to_ui
+from ._compat import get_text_stderr
+from ._compat import PY2
+from .utils import echo
+
+
+def _join_param_hints(param_hint):
+    if isinstance(param_hint, (tuple, list)):
+        return " / ".join(repr(x) for x in param_hint)
+    return param_hint
+
+
+class ClickException(Exception):
+    """An exception that Click can handle and show to the user."""
+
+    #: The exit code for this exception
+    exit_code = 1
+
+    def __init__(self, message):
+        ctor_msg = message
+        if PY2:
+            if ctor_msg is not None:
+                ctor_msg = ctor_msg.encode("utf-8")
+        Exception.__init__(self, ctor_msg)
+        self.message = message
+
+    def format_message(self):
+        return self.message
+
+    def __str__(self):
+        return self.message
+
+    if PY2:
+        __unicode__ = __str__
+
+        def __str__(self):
+            return self.message.encode("utf-8")
+
+    def show(self, file=None):
+        if file is None:
+            file = get_text_stderr()
+        echo("Error: {}".format(self.format_message()), file=file)
+
+
+class UsageError(ClickException):
+    """An internal exception that signals a usage error.  This typically
+    aborts any further handling.
+
+    :param message: the error message to display.
+    :param ctx: optionally the context that caused this error.  Click will
+                fill in the context automatically in some situations.
+    """
+
+    exit_code = 2
+
+    def __init__(self, message, ctx=None):
+        ClickException.__init__(self, message)
+        self.ctx = ctx
+        self.cmd = self.ctx.command if self.ctx else None
+
+    def show(self, file=None):
+        if file is None:
+            file = get_text_stderr()
+        color = None
+        hint = ""
+        if self.cmd is not None and self.cmd.get_help_option(self.ctx) is not None:
+            hint = "Try '{} {}' for help.\n".format(
+                self.ctx.command_path, self.ctx.help_option_names[0]
+            )
+        if self.ctx is not None:
+            color = self.ctx.color
+            echo("{}\n{}".format(self.ctx.get_usage(), hint), file=file, color=color)
+        echo("Error: {}".format(self.format_message()), file=file, color=color)
+
+
+class BadParameter(UsageError):
+    """An exception that formats out a standardized error message for a
+    bad parameter.  This is useful when thrown from a callback or type as
+    Click will attach contextual information to it (for instance, which
+    parameter it is).
+
+    .. versionadded:: 2.0
+
+    :param param: the parameter object that caused this error.  This can
+                  be left out, and Click will attach this info itself
+                  if possible.
+    :param param_hint: a string that shows up as parameter name.  This
+                       can be used as alternative to `param` in cases
+                       where custom validation should happen.  If it is
+                       a string it's used as such, if it's a list then
+                       each item is quoted and separated.
+    """
+
+    def __init__(self, message, ctx=None, param=None, param_hint=None):
+        UsageError.__init__(self, message, ctx)
+        self.param = param
+        self.param_hint = param_hint
+
+    def format_message(self):
+        if self.param_hint is not None:
+            param_hint = self.param_hint
+        elif self.param is not None:
+            param_hint = self.param.get_error_hint(self.ctx)
+        else:
+            return "Invalid value: {}".format(self.message)
+        param_hint = _join_param_hints(param_hint)
+
+        return "Invalid value for {}: {}".format(param_hint, self.message)
+
+
+class MissingParameter(BadParameter):
+    """Raised if click required an option or argument but it was not
+    provided when invoking the script.
+
+    .. versionadded:: 4.0
+
+    :param param_type: a string that indicates the type of the parameter.
+                       The default is to inherit the parameter type from
+                       the given `param`.  Valid values are ``'parameter'``,
+                       ``'option'`` or ``'argument'``.
+    """
+
+    def __init__(
+        self, message=None, ctx=None, param=None, param_hint=None, param_type=None
+    ):
+        BadParameter.__init__(self, message, ctx, param, param_hint)
+        self.param_type = param_type
+
+    def format_message(self):
+        if self.param_hint is not None:
+            param_hint = self.param_hint
+        elif self.param is not None:
+            param_hint = self.param.get_error_hint(self.ctx)
+        else:
+            param_hint = None
+        param_hint = _join_param_hints(param_hint)
+
+        param_type = self.param_type
+        if param_type is None and self.param is not None:
+            param_type = self.param.param_type_name
+
+        msg = self.message
+        if self.param is not None:
+            msg_extra = self.param.type.get_missing_message(self.param)
+            if msg_extra:
+                if msg:
+                    msg += ".  {}".format(msg_extra)
+                else:
+                    msg = msg_extra
+
+        return "Missing {}{}{}{}".format(
+            param_type,
+            " {}".format(param_hint) if param_hint else "",
+            ".  " if msg else ".",
+            msg or "",
+        )
+
+    def __str__(self):
+        if self.message is None:
+            param_name = self.param.name if self.param else None
+            return "missing parameter: {}".format(param_name)
+        else:
+            return self.message
+
+    if PY2:
+        __unicode__ = __str__
+
+        def __str__(self):
+            return self.__unicode__().encode("utf-8")
+
+
+class NoSuchOption(UsageError):
+    """Raised if click attempted to handle an option that does not
+    exist.
+
+    .. versionadded:: 4.0
+    """
+
+    def __init__(self, option_name, message=None, possibilities=None, ctx=None):
+        if message is None:
+            message = "no such option: {}".format(option_name)
+        UsageError.__init__(self, message, ctx)
+        self.option_name = option_name
+        self.possibilities = possibilities
+
+    def format_message(self):
+        bits = [self.message]
+        if self.possibilities:
+            if len(self.possibilities) == 1:
+                bits.append("Did you mean {}?".format(self.possibilities[0]))
+            else:
+                possibilities = sorted(self.possibilities)
+                bits.append("(Possible options: {})".format(", ".join(possibilities)))
+        return "  ".join(bits)
+
+
+class BadOptionUsage(UsageError):
+    """Raised if an option is generally supplied but the use of the option
+    was incorrect.  This is for instance raised if the number of arguments
+    for an option is not correct.
+
+    .. versionadded:: 4.0
+
+    :param option_name: the name of the option being used incorrectly.
+    """
+
+    def __init__(self, option_name, message, ctx=None):
+        UsageError.__init__(self, message, ctx)
+        self.option_name = option_name
+
+
+class BadArgumentUsage(UsageError):
+    """Raised if an argument is generally supplied but the use of the argument
+    was incorrect.  This is for instance raised if the number of values
+    for an argument is not correct.
+
+    .. versionadded:: 6.0
+    """
+
+    def __init__(self, message, ctx=None):
+        UsageError.__init__(self, message, ctx)
+
+
+class FileError(ClickException):
+    """Raised if a file cannot be opened."""
+
+    def __init__(self, filename, hint=None):
+        ui_filename = filename_to_ui(filename)
+        if hint is None:
+            hint = "unknown error"
+        ClickException.__init__(self, hint)
+        self.ui_filename = ui_filename
+        self.filename = filename
+
+    def format_message(self):
+        return "Could not open file {}: {}".format(self.ui_filename, self.message)
+
+
+class Abort(RuntimeError):
+    """An internal signalling exception that signals Click to abort."""
+
+
+class Exit(RuntimeError):
+    """An exception that indicates that the application should exit with some
+    status code.
+
+    :param code: the status code to exit with.
+    """
+
+    __slots__ = ("exit_code",)
+
+    def __init__(self, code=0):
+        self.exit_code = code

+ 283 - 0
Lib/site-packages/click/formatting.py

@@ -0,0 +1,283 @@
+from contextlib import contextmanager
+
+from ._compat import term_len
+from .parser import split_opt
+from .termui import get_terminal_size
+
+# Can force a width.  This is used by the test system
+FORCED_WIDTH = None
+
+
+def measure_table(rows):
+    widths = {}
+    for row in rows:
+        for idx, col in enumerate(row):
+            widths[idx] = max(widths.get(idx, 0), term_len(col))
+    return tuple(y for x, y in sorted(widths.items()))
+
+
+def iter_rows(rows, col_count):
+    for row in rows:
+        row = tuple(row)
+        yield row + ("",) * (col_count - len(row))
+
+
+def wrap_text(
+    text, width=78, initial_indent="", subsequent_indent="", preserve_paragraphs=False
+):
+    """A helper function that intelligently wraps text.  By default, it
+    assumes that it operates on a single paragraph of text but if the
+    `preserve_paragraphs` parameter is provided it will intelligently
+    handle paragraphs (defined by two empty lines).
+
+    If paragraphs are handled, a paragraph can be prefixed with an empty
+    line containing the ``\\b`` character (``\\x08``) to indicate that
+    no rewrapping should happen in that block.
+
+    :param text: the text that should be rewrapped.
+    :param width: the maximum width for the text.
+    :param initial_indent: the initial indent that should be placed on the
+                           first line as a string.
+    :param subsequent_indent: the indent string that should be placed on
+                              each consecutive line.
+    :param preserve_paragraphs: if this flag is set then the wrapping will
+                                intelligently handle paragraphs.
+    """
+    from ._textwrap import TextWrapper
+
+    text = text.expandtabs()
+    wrapper = TextWrapper(
+        width,
+        initial_indent=initial_indent,
+        subsequent_indent=subsequent_indent,
+        replace_whitespace=False,
+    )
+    if not preserve_paragraphs:
+        return wrapper.fill(text)
+
+    p = []
+    buf = []
+    indent = None
+
+    def _flush_par():
+        if not buf:
+            return
+        if buf[0].strip() == "\b":
+            p.append((indent or 0, True, "\n".join(buf[1:])))
+        else:
+            p.append((indent or 0, False, " ".join(buf)))
+        del buf[:]
+
+    for line in text.splitlines():
+        if not line:
+            _flush_par()
+            indent = None
+        else:
+            if indent is None:
+                orig_len = term_len(line)
+                line = line.lstrip()
+                indent = orig_len - term_len(line)
+            buf.append(line)
+    _flush_par()
+
+    rv = []
+    for indent, raw, text in p:
+        with wrapper.extra_indent(" " * indent):
+            if raw:
+                rv.append(wrapper.indent_only(text))
+            else:
+                rv.append(wrapper.fill(text))
+
+    return "\n\n".join(rv)
+
+
+class HelpFormatter(object):
+    """This class helps with formatting text-based help pages.  It's
+    usually just needed for very special internal cases, but it's also
+    exposed so that developers can write their own fancy outputs.
+
+    At present, it always writes into memory.
+
+    :param indent_increment: the additional increment for each level.
+    :param width: the width for the text.  This defaults to the terminal
+                  width clamped to a maximum of 78.
+    """
+
+    def __init__(self, indent_increment=2, width=None, max_width=None):
+        self.indent_increment = indent_increment
+        if max_width is None:
+            max_width = 80
+        if width is None:
+            width = FORCED_WIDTH
+            if width is None:
+                width = max(min(get_terminal_size()[0], max_width) - 2, 50)
+        self.width = width
+        self.current_indent = 0
+        self.buffer = []
+
+    def write(self, string):
+        """Writes a unicode string into the internal buffer."""
+        self.buffer.append(string)
+
+    def indent(self):
+        """Increases the indentation."""
+        self.current_indent += self.indent_increment
+
+    def dedent(self):
+        """Decreases the indentation."""
+        self.current_indent -= self.indent_increment
+
+    def write_usage(self, prog, args="", prefix="Usage: "):
+        """Writes a usage line into the buffer.
+
+        :param prog: the program name.
+        :param args: whitespace separated list of arguments.
+        :param prefix: the prefix for the first line.
+        """
+        usage_prefix = "{:>{w}}{} ".format(prefix, prog, w=self.current_indent)
+        text_width = self.width - self.current_indent
+
+        if text_width >= (term_len(usage_prefix) + 20):
+            # The arguments will fit to the right of the prefix.
+            indent = " " * term_len(usage_prefix)
+            self.write(
+                wrap_text(
+                    args,
+                    text_width,
+                    initial_indent=usage_prefix,
+                    subsequent_indent=indent,
+                )
+            )
+        else:
+            # The prefix is too long, put the arguments on the next line.
+            self.write(usage_prefix)
+            self.write("\n")
+            indent = " " * (max(self.current_indent, term_len(prefix)) + 4)
+            self.write(
+                wrap_text(
+                    args, text_width, initial_indent=indent, subsequent_indent=indent
+                )
+            )
+
+        self.write("\n")
+
+    def write_heading(self, heading):
+        """Writes a heading into the buffer."""
+        self.write("{:>{w}}{}:\n".format("", heading, w=self.current_indent))
+
+    def write_paragraph(self):
+        """Writes a paragraph into the buffer."""
+        if self.buffer:
+            self.write("\n")
+
+    def write_text(self, text):
+        """Writes re-indented text into the buffer.  This rewraps and
+        preserves paragraphs.
+        """
+        text_width = max(self.width - self.current_indent, 11)
+        indent = " " * self.current_indent
+        self.write(
+            wrap_text(
+                text,
+                text_width,
+                initial_indent=indent,
+                subsequent_indent=indent,
+                preserve_paragraphs=True,
+            )
+        )
+        self.write("\n")
+
+    def write_dl(self, rows, col_max=30, col_spacing=2):
+        """Writes a definition list into the buffer.  This is how options
+        and commands are usually formatted.
+
+        :param rows: a list of two item tuples for the terms and values.
+        :param col_max: the maximum width of the first column.
+        :param col_spacing: the number of spaces between the first and
+                            second column.
+        """
+        rows = list(rows)
+        widths = measure_table(rows)
+        if len(widths) != 2:
+            raise TypeError("Expected two columns for definition list")
+
+        first_col = min(widths[0], col_max) + col_spacing
+
+        for first, second in iter_rows(rows, len(widths)):
+            self.write("{:>{w}}{}".format("", first, w=self.current_indent))
+            if not second:
+                self.write("\n")
+                continue
+            if term_len(first) <= first_col - col_spacing:
+                self.write(" " * (first_col - term_len(first)))
+            else:
+                self.write("\n")
+                self.write(" " * (first_col + self.current_indent))
+
+            text_width = max(self.width - first_col - 2, 10)
+            wrapped_text = wrap_text(second, text_width, preserve_paragraphs=True)
+            lines = wrapped_text.splitlines()
+
+            if lines:
+                self.write("{}\n".format(lines[0]))
+
+                for line in lines[1:]:
+                    self.write(
+                        "{:>{w}}{}\n".format(
+                            "", line, w=first_col + self.current_indent
+                        )
+                    )
+
+                if len(lines) > 1:
+                    # separate long help from next option
+                    self.write("\n")
+            else:
+                self.write("\n")
+
+    @contextmanager
+    def section(self, name):
+        """Helpful context manager that writes a paragraph, a heading,
+        and the indents.
+
+        :param name: the section name that is written as heading.
+        """
+        self.write_paragraph()
+        self.write_heading(name)
+        self.indent()
+        try:
+            yield
+        finally:
+            self.dedent()
+
+    @contextmanager
+    def indentation(self):
+        """A context manager that increases the indentation."""
+        self.indent()
+        try:
+            yield
+        finally:
+            self.dedent()
+
+    def getvalue(self):
+        """Returns the buffer contents."""
+        return "".join(self.buffer)
+
+
+def join_options(options):
+    """Given a list of option strings this joins them in the most appropriate
+    way and returns them in the form ``(formatted_string,
+    any_prefix_is_slash)`` where the second item in the tuple is a flag that
+    indicates if any of the option prefixes was a slash.
+    """
+    rv = []
+    any_prefix_is_slash = False
+    for opt in options:
+        prefix = split_opt(opt)[0]
+        if prefix == "/":
+            any_prefix_is_slash = True
+        rv.append((len(prefix), opt))
+
+    rv.sort(key=lambda x: x[0])
+
+    rv = ", ".join(x[1] for x in rv)
+    return rv, any_prefix_is_slash

+ 47 - 0
Lib/site-packages/click/globals.py

@@ -0,0 +1,47 @@
+from threading import local
+
+_local = local()
+
+
+def get_current_context(silent=False):
+    """Returns the current click context.  This can be used as a way to
+    access the current context object from anywhere.  This is a more implicit
+    alternative to the :func:`pass_context` decorator.  This function is
+    primarily useful for helpers such as :func:`echo` which might be
+    interested in changing its behavior based on the current context.
+
+    To push the current context, :meth:`Context.scope` can be used.
+
+    .. versionadded:: 5.0
+
+    :param silent: if set to `True` the return value is `None` if no context
+                   is available.  The default behavior is to raise a
+                   :exc:`RuntimeError`.
+    """
+    try:
+        return _local.stack[-1]
+    except (AttributeError, IndexError):
+        if not silent:
+            raise RuntimeError("There is no active click context.")
+
+
+def push_context(ctx):
+    """Pushes a new context to the current stack."""
+    _local.__dict__.setdefault("stack", []).append(ctx)
+
+
+def pop_context():
+    """Removes the top level from the stack."""
+    _local.stack.pop()
+
+
+def resolve_color_default(color=None):
+    """"Internal helper to get the default value of the color flag.  If a
+    value is passed it's returned unchanged, otherwise it's looked up from
+    the current context.
+    """
+    if color is not None:
+        return color
+    ctx = get_current_context(silent=True)
+    if ctx is not None:
+        return ctx.color

+ 428 - 0
Lib/site-packages/click/parser.py

@@ -0,0 +1,428 @@
+# -*- coding: utf-8 -*-
+"""
+This module started out as largely a copy paste from the stdlib's
+optparse module with the features removed that we do not need from
+optparse because we implement them in Click on a higher level (for
+instance type handling, help formatting and a lot more).
+
+The plan is to remove more and more from here over time.
+
+The reason this is a different module and not optparse from the stdlib
+is that there are differences in 2.x and 3.x about the error messages
+generated and optparse in the stdlib uses gettext for no good reason
+and might cause us issues.
+
+Click uses parts of optparse written by Gregory P. Ward and maintained
+by the Python Software Foundation. This is limited to code in parser.py.
+
+Copyright 2001-2006 Gregory P. Ward. All rights reserved.
+Copyright 2002-2006 Python Software Foundation. All rights reserved.
+"""
+import re
+from collections import deque
+
+from .exceptions import BadArgumentUsage
+from .exceptions import BadOptionUsage
+from .exceptions import NoSuchOption
+from .exceptions import UsageError
+
+
+def _unpack_args(args, nargs_spec):
+    """Given an iterable of arguments and an iterable of nargs specifications,
+    it returns a tuple with all the unpacked arguments at the first index
+    and all remaining arguments as the second.
+
+    The nargs specification is the number of arguments that should be consumed
+    or `-1` to indicate that this position should eat up all the remainders.
+
+    Missing items are filled with `None`.
+    """
+    args = deque(args)
+    nargs_spec = deque(nargs_spec)
+    rv = []
+    spos = None
+
+    def _fetch(c):
+        try:
+            if spos is None:
+                return c.popleft()
+            else:
+                return c.pop()
+        except IndexError:
+            return None
+
+    while nargs_spec:
+        nargs = _fetch(nargs_spec)
+        if nargs == 1:
+            rv.append(_fetch(args))
+        elif nargs > 1:
+            x = [_fetch(args) for _ in range(nargs)]
+            # If we're reversed, we're pulling in the arguments in reverse,
+            # so we need to turn them around.
+            if spos is not None:
+                x.reverse()
+            rv.append(tuple(x))
+        elif nargs < 0:
+            if spos is not None:
+                raise TypeError("Cannot have two nargs < 0")
+            spos = len(rv)
+            rv.append(None)
+
+    # spos is the position of the wildcard (star).  If it's not `None`,
+    # we fill it with the remainder.
+    if spos is not None:
+        rv[spos] = tuple(args)
+        args = []
+        rv[spos + 1 :] = reversed(rv[spos + 1 :])
+
+    return tuple(rv), list(args)
+
+
+def _error_opt_args(nargs, opt):
+    if nargs == 1:
+        raise BadOptionUsage(opt, "{} option requires an argument".format(opt))
+    raise BadOptionUsage(opt, "{} option requires {} arguments".format(opt, nargs))
+
+
+def split_opt(opt):
+    first = opt[:1]
+    if first.isalnum():
+        return "", opt
+    if opt[1:2] == first:
+        return opt[:2], opt[2:]
+    return first, opt[1:]
+
+
+def normalize_opt(opt, ctx):
+    if ctx is None or ctx.token_normalize_func is None:
+        return opt
+    prefix, opt = split_opt(opt)
+    return prefix + ctx.token_normalize_func(opt)
+
+
+def split_arg_string(string):
+    """Given an argument string this attempts to split it into small parts."""
+    rv = []
+    for match in re.finditer(
+        r"('([^'\\]*(?:\\.[^'\\]*)*)'|\"([^\"\\]*(?:\\.[^\"\\]*)*)\"|\S+)\s*",
+        string,
+        re.S,
+    ):
+        arg = match.group().strip()
+        if arg[:1] == arg[-1:] and arg[:1] in "\"'":
+            arg = arg[1:-1].encode("ascii", "backslashreplace").decode("unicode-escape")
+        try:
+            arg = type(string)(arg)
+        except UnicodeError:
+            pass
+        rv.append(arg)
+    return rv
+
+
+class Option(object):
+    def __init__(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+        self._short_opts = []
+        self._long_opts = []
+        self.prefixes = set()
+
+        for opt in opts:
+            prefix, value = split_opt(opt)
+            if not prefix:
+                raise ValueError("Invalid start character for option ({})".format(opt))
+            self.prefixes.add(prefix[0])
+            if len(prefix) == 1 and len(value) == 1:
+                self._short_opts.append(opt)
+            else:
+                self._long_opts.append(opt)
+                self.prefixes.add(prefix)
+
+        if action is None:
+            action = "store"
+
+        self.dest = dest
+        self.action = action
+        self.nargs = nargs
+        self.const = const
+        self.obj = obj
+
+    @property
+    def takes_value(self):
+        return self.action in ("store", "append")
+
+    def process(self, value, state):
+        if self.action == "store":
+            state.opts[self.dest] = value
+        elif self.action == "store_const":
+            state.opts[self.dest] = self.const
+        elif self.action == "append":
+            state.opts.setdefault(self.dest, []).append(value)
+        elif self.action == "append_const":
+            state.opts.setdefault(self.dest, []).append(self.const)
+        elif self.action == "count":
+            state.opts[self.dest] = state.opts.get(self.dest, 0) + 1
+        else:
+            raise ValueError("unknown action '{}'".format(self.action))
+        state.order.append(self.obj)
+
+
+class Argument(object):
+    def __init__(self, dest, nargs=1, obj=None):
+        self.dest = dest
+        self.nargs = nargs
+        self.obj = obj
+
+    def process(self, value, state):
+        if self.nargs > 1:
+            holes = sum(1 for x in value if x is None)
+            if holes == len(value):
+                value = None
+            elif holes != 0:
+                raise BadArgumentUsage(
+                    "argument {} takes {} values".format(self.dest, self.nargs)
+                )
+        state.opts[self.dest] = value
+        state.order.append(self.obj)
+
+
+class ParsingState(object):
+    def __init__(self, rargs):
+        self.opts = {}
+        self.largs = []
+        self.rargs = rargs
+        self.order = []
+
+
+class OptionParser(object):
+    """The option parser is an internal class that is ultimately used to
+    parse options and arguments.  It's modelled after optparse and brings
+    a similar but vastly simplified API.  It should generally not be used
+    directly as the high level Click classes wrap it for you.
+
+    It's not nearly as extensible as optparse or argparse as it does not
+    implement features that are implemented on a higher level (such as
+    types or defaults).
+
+    :param ctx: optionally the :class:`~click.Context` where this parser
+                should go with.
+    """
+
+    def __init__(self, ctx=None):
+        #: The :class:`~click.Context` for this parser.  This might be
+        #: `None` for some advanced use cases.
+        self.ctx = ctx
+        #: This controls how the parser deals with interspersed arguments.
+        #: If this is set to `False`, the parser will stop on the first
+        #: non-option.  Click uses this to implement nested subcommands
+        #: safely.
+        self.allow_interspersed_args = True
+        #: This tells the parser how to deal with unknown options.  By
+        #: default it will error out (which is sensible), but there is a
+        #: second mode where it will ignore it and continue processing
+        #: after shifting all the unknown options into the resulting args.
+        self.ignore_unknown_options = False
+        if ctx is not None:
+            self.allow_interspersed_args = ctx.allow_interspersed_args
+            self.ignore_unknown_options = ctx.ignore_unknown_options
+        self._short_opt = {}
+        self._long_opt = {}
+        self._opt_prefixes = {"-", "--"}
+        self._args = []
+
+    def add_option(self, opts, dest, action=None, nargs=1, const=None, obj=None):
+        """Adds a new option named `dest` to the parser.  The destination
+        is not inferred (unlike with optparse) and needs to be explicitly
+        provided.  Action can be any of ``store``, ``store_const``,
+        ``append``, ``appnd_const`` or ``count``.
+
+        The `obj` can be used to identify the option in the order list
+        that is returned from the parser.
+        """
+        if obj is None:
+            obj = dest
+        opts = [normalize_opt(opt, self.ctx) for opt in opts]
+        option = Option(opts, dest, action=action, nargs=nargs, const=const, obj=obj)
+        self._opt_prefixes.update(option.prefixes)
+        for opt in option._short_opts:
+            self._short_opt[opt] = option
+        for opt in option._long_opts:
+            self._long_opt[opt] = option
+
+    def add_argument(self, dest, nargs=1, obj=None):
+        """Adds a positional argument named `dest` to the parser.
+
+        The `obj` can be used to identify the option in the order list
+        that is returned from the parser.
+        """
+        if obj is None:
+            obj = dest
+        self._args.append(Argument(dest=dest, nargs=nargs, obj=obj))
+
+    def parse_args(self, args):
+        """Parses positional arguments and returns ``(values, args, order)``
+        for the parsed options and arguments as well as the leftover
+        arguments if there are any.  The order is a list of objects as they
+        appear on the command line.  If arguments appear multiple times they
+        will be memorized multiple times as well.
+        """
+        state = ParsingState(args)
+        try:
+            self._process_args_for_options(state)
+            self._process_args_for_args(state)
+        except UsageError:
+            if self.ctx is None or not self.ctx.resilient_parsing:
+                raise
+        return state.opts, state.largs, state.order
+
+    def _process_args_for_args(self, state):
+        pargs, args = _unpack_args(
+            state.largs + state.rargs, [x.nargs for x in self._args]
+        )
+
+        for idx, arg in enumerate(self._args):
+            arg.process(pargs[idx], state)
+
+        state.largs = args
+        state.rargs = []
+
+    def _process_args_for_options(self, state):
+        while state.rargs:
+            arg = state.rargs.pop(0)
+            arglen = len(arg)
+            # Double dashes always handled explicitly regardless of what
+            # prefixes are valid.
+            if arg == "--":
+                return
+            elif arg[:1] in self._opt_prefixes and arglen > 1:
+                self._process_opts(arg, state)
+            elif self.allow_interspersed_args:
+                state.largs.append(arg)
+            else:
+                state.rargs.insert(0, arg)
+                return
+
+        # Say this is the original argument list:
+        # [arg0, arg1, ..., arg(i-1), arg(i), arg(i+1), ..., arg(N-1)]
+        #                            ^
+        # (we are about to process arg(i)).
+        #
+        # Then rargs is [arg(i), ..., arg(N-1)] and largs is a *subset* of
+        # [arg0, ..., arg(i-1)] (any options and their arguments will have
+        # been removed from largs).
+        #
+        # The while loop will usually consume 1 or more arguments per pass.
+        # If it consumes 1 (eg. arg is an option that takes no arguments),
+        # then after _process_arg() is done the situation is:
+        #
+        #   largs = subset of [arg0, ..., arg(i)]
+        #   rargs = [arg(i+1), ..., arg(N-1)]
+        #
+        # If allow_interspersed_args is false, largs will always be
+        # *empty* -- still a subset of [arg0, ..., arg(i-1)], but
+        # not a very interesting subset!
+
+    def _match_long_opt(self, opt, explicit_value, state):
+        if opt not in self._long_opt:
+            possibilities = [word for word in self._long_opt if word.startswith(opt)]
+            raise NoSuchOption(opt, possibilities=possibilities, ctx=self.ctx)
+
+        option = self._long_opt[opt]
+        if option.takes_value:
+            # At this point it's safe to modify rargs by injecting the
+            # explicit value, because no exception is raised in this
+            # branch.  This means that the inserted value will be fully
+            # consumed.
+            if explicit_value is not None:
+                state.rargs.insert(0, explicit_value)
+
+            nargs = option.nargs
+            if len(state.rargs) < nargs:
+                _error_opt_args(nargs, opt)
+            elif nargs == 1:
+                value = state.rargs.pop(0)
+            else:
+                value = tuple(state.rargs[:nargs])
+                del state.rargs[:nargs]
+
+        elif explicit_value is not None:
+            raise BadOptionUsage(opt, "{} option does not take a value".format(opt))
+
+        else:
+            value = None
+
+        option.process(value, state)
+
+    def _match_short_opt(self, arg, state):
+        stop = False
+        i = 1
+        prefix = arg[0]
+        unknown_options = []
+
+        for ch in arg[1:]:
+            opt = normalize_opt(prefix + ch, self.ctx)
+            option = self._short_opt.get(opt)
+            i += 1
+
+            if not option:
+                if self.ignore_unknown_options:
+                    unknown_options.append(ch)
+                    continue
+                raise NoSuchOption(opt, ctx=self.ctx)
+            if option.takes_value:
+                # Any characters left in arg?  Pretend they're the
+                # next arg, and stop consuming characters of arg.
+                if i < len(arg):
+                    state.rargs.insert(0, arg[i:])
+                    stop = True
+
+                nargs = option.nargs
+                if len(state.rargs) < nargs:
+                    _error_opt_args(nargs, opt)
+                elif nargs == 1:
+                    value = state.rargs.pop(0)
+                else:
+                    value = tuple(state.rargs[:nargs])
+                    del state.rargs[:nargs]
+
+            else:
+                value = None
+
+            option.process(value, state)
+
+            if stop:
+                break
+
+        # If we got any unknown options we re-combinate the string of the
+        # remaining options and re-attach the prefix, then report that
+        # to the state as new larg.  This way there is basic combinatorics
+        # that can be achieved while still ignoring unknown arguments.
+        if self.ignore_unknown_options and unknown_options:
+            state.largs.append("{}{}".format(prefix, "".join(unknown_options)))
+
+    def _process_opts(self, arg, state):
+        explicit_value = None
+        # Long option handling happens in two parts.  The first part is
+        # supporting explicitly attached values.  In any case, we will try
+        # to long match the option first.
+        if "=" in arg:
+            long_opt, explicit_value = arg.split("=", 1)
+        else:
+            long_opt = arg
+        norm_long_opt = normalize_opt(long_opt, self.ctx)
+
+        # At this point we will match the (assumed) long option through
+        # the long option matching code.  Note that this allows options
+        # like "-foo" to be matched as long options.
+        try:
+            self._match_long_opt(norm_long_opt, explicit_value, state)
+        except NoSuchOption:
+            # At this point the long option matching failed, and we need
+            # to try with short options.  However there is a special rule
+            # which says, that if we have a two character options prefix
+            # (applies to "--foo" for instance), we do not dispatch to the
+            # short option code and will instead raise the no option
+            # error.
+            if arg[:2] not in self._opt_prefixes:
+                return self._match_short_opt(arg, state)
+            if not self.ignore_unknown_options:
+                raise
+            state.largs.append(arg)

+ 681 - 0
Lib/site-packages/click/termui.py

@@ -0,0 +1,681 @@
+import inspect
+import io
+import itertools
+import os
+import struct
+import sys
+
+from ._compat import DEFAULT_COLUMNS
+from ._compat import get_winterm_size
+from ._compat import isatty
+from ._compat import raw_input
+from ._compat import string_types
+from ._compat import strip_ansi
+from ._compat import text_type
+from ._compat import WIN
+from .exceptions import Abort
+from .exceptions import UsageError
+from .globals import resolve_color_default
+from .types import Choice
+from .types import convert_type
+from .types import Path
+from .utils import echo
+from .utils import LazyFile
+
+# The prompt functions to use.  The doc tools currently override these
+# functions to customize how they work.
+visible_prompt_func = raw_input
+
+_ansi_colors = {
+    "black": 30,
+    "red": 31,
+    "green": 32,
+    "yellow": 33,
+    "blue": 34,
+    "magenta": 35,
+    "cyan": 36,
+    "white": 37,
+    "reset": 39,
+    "bright_black": 90,
+    "bright_red": 91,
+    "bright_green": 92,
+    "bright_yellow": 93,
+    "bright_blue": 94,
+    "bright_magenta": 95,
+    "bright_cyan": 96,
+    "bright_white": 97,
+}
+_ansi_reset_all = "\033[0m"
+
+
+def hidden_prompt_func(prompt):
+    import getpass
+
+    return getpass.getpass(prompt)
+
+
+def _build_prompt(
+    text, suffix, show_default=False, default=None, show_choices=True, type=None
+):
+    prompt = text
+    if type is not None and show_choices and isinstance(type, Choice):
+        prompt += " ({})".format(", ".join(map(str, type.choices)))
+    if default is not None and show_default:
+        prompt = "{} [{}]".format(prompt, _format_default(default))
+    return prompt + suffix
+
+
+def _format_default(default):
+    if isinstance(default, (io.IOBase, LazyFile)) and hasattr(default, "name"):
+        return default.name
+
+    return default
+
+
+def prompt(
+    text,
+    default=None,
+    hide_input=False,
+    confirmation_prompt=False,
+    type=None,
+    value_proc=None,
+    prompt_suffix=": ",
+    show_default=True,
+    err=False,
+    show_choices=True,
+):
+    """Prompts a user for input.  This is a convenience function that can
+    be used to prompt a user for input later.
+
+    If the user aborts the input by sending a interrupt signal, this
+    function will catch it and raise a :exc:`Abort` exception.
+
+    .. versionadded:: 7.0
+       Added the show_choices parameter.
+
+    .. versionadded:: 6.0
+       Added unicode support for cmd.exe on Windows.
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param text: the text to show for the prompt.
+    :param default: the default value to use if no input happens.  If this
+                    is not given it will prompt until it's aborted.
+    :param hide_input: if this is set to true then the input value will
+                       be hidden.
+    :param confirmation_prompt: asks for confirmation for the value.
+    :param type: the type to use to check the value against.
+    :param value_proc: if this parameter is provided it's a function that
+                       is invoked instead of the type conversion to
+                       convert a value.
+    :param prompt_suffix: a suffix that should be added to the prompt.
+    :param show_default: shows or hides the default value in the prompt.
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    :param show_choices: Show or hide choices if the passed type is a Choice.
+                         For example if type is a Choice of either day or week,
+                         show_choices is true and text is "Group by" then the
+                         prompt will be "Group by (day, week): ".
+    """
+    result = None
+
+    def prompt_func(text):
+        f = hidden_prompt_func if hide_input else visible_prompt_func
+        try:
+            # Write the prompt separately so that we get nice
+            # coloring through colorama on Windows
+            echo(text, nl=False, err=err)
+            return f("")
+        except (KeyboardInterrupt, EOFError):
+            # getpass doesn't print a newline if the user aborts input with ^C.
+            # Allegedly this behavior is inherited from getpass(3).
+            # A doc bug has been filed at https://bugs.python.org/issue24711
+            if hide_input:
+                echo(None, err=err)
+            raise Abort()
+
+    if value_proc is None:
+        value_proc = convert_type(type, default)
+
+    prompt = _build_prompt(
+        text, prompt_suffix, show_default, default, show_choices, type
+    )
+
+    while 1:
+        while 1:
+            value = prompt_func(prompt)
+            if value:
+                break
+            elif default is not None:
+                if isinstance(value_proc, Path):
+                    # validate Path default value(exists, dir_okay etc.)
+                    value = default
+                    break
+                return default
+        try:
+            result = value_proc(value)
+        except UsageError as e:
+            echo("Error: {}".format(e.message), err=err)  # noqa: B306
+            continue
+        if not confirmation_prompt:
+            return result
+        while 1:
+            value2 = prompt_func("Repeat for confirmation: ")
+            if value2:
+                break
+        if value == value2:
+            return result
+        echo("Error: the two entered values do not match", err=err)
+
+
+def confirm(
+    text, default=False, abort=False, prompt_suffix=": ", show_default=True, err=False
+):
+    """Prompts for confirmation (yes/no question).
+
+    If the user aborts the input by sending a interrupt signal this
+    function will catch it and raise a :exc:`Abort` exception.
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param text: the question to ask.
+    :param default: the default for the prompt.
+    :param abort: if this is set to `True` a negative answer aborts the
+                  exception by raising :exc:`Abort`.
+    :param prompt_suffix: a suffix that should be added to the prompt.
+    :param show_default: shows or hides the default value in the prompt.
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    """
+    prompt = _build_prompt(
+        text, prompt_suffix, show_default, "Y/n" if default else "y/N"
+    )
+    while 1:
+        try:
+            # Write the prompt separately so that we get nice
+            # coloring through colorama on Windows
+            echo(prompt, nl=False, err=err)
+            value = visible_prompt_func("").lower().strip()
+        except (KeyboardInterrupt, EOFError):
+            raise Abort()
+        if value in ("y", "yes"):
+            rv = True
+        elif value in ("n", "no"):
+            rv = False
+        elif value == "":
+            rv = default
+        else:
+            echo("Error: invalid input", err=err)
+            continue
+        break
+    if abort and not rv:
+        raise Abort()
+    return rv
+
+
+def get_terminal_size():
+    """Returns the current size of the terminal as tuple in the form
+    ``(width, height)`` in columns and rows.
+    """
+    # If shutil has get_terminal_size() (Python 3.3 and later) use that
+    if sys.version_info >= (3, 3):
+        import shutil
+
+        shutil_get_terminal_size = getattr(shutil, "get_terminal_size", None)
+        if shutil_get_terminal_size:
+            sz = shutil_get_terminal_size()
+            return sz.columns, sz.lines
+
+    # We provide a sensible default for get_winterm_size() when being invoked
+    # inside a subprocess. Without this, it would not provide a useful input.
+    if get_winterm_size is not None:
+        size = get_winterm_size()
+        if size == (0, 0):
+            return (79, 24)
+        else:
+            return size
+
+    def ioctl_gwinsz(fd):
+        try:
+            import fcntl
+            import termios
+
+            cr = struct.unpack("hh", fcntl.ioctl(fd, termios.TIOCGWINSZ, "1234"))
+        except Exception:
+            return
+        return cr
+
+    cr = ioctl_gwinsz(0) or ioctl_gwinsz(1) or ioctl_gwinsz(2)
+    if not cr:
+        try:
+            fd = os.open(os.ctermid(), os.O_RDONLY)
+            try:
+                cr = ioctl_gwinsz(fd)
+            finally:
+                os.close(fd)
+        except Exception:
+            pass
+    if not cr or not cr[0] or not cr[1]:
+        cr = (os.environ.get("LINES", 25), os.environ.get("COLUMNS", DEFAULT_COLUMNS))
+    return int(cr[1]), int(cr[0])
+
+
+def echo_via_pager(text_or_generator, color=None):
+    """This function takes a text and shows it via an environment specific
+    pager on stdout.
+
+    .. versionchanged:: 3.0
+       Added the `color` flag.
+
+    :param text_or_generator: the text to page, or alternatively, a
+                              generator emitting the text to page.
+    :param color: controls if the pager supports ANSI colors or not.  The
+                  default is autodetection.
+    """
+    color = resolve_color_default(color)
+
+    if inspect.isgeneratorfunction(text_or_generator):
+        i = text_or_generator()
+    elif isinstance(text_or_generator, string_types):
+        i = [text_or_generator]
+    else:
+        i = iter(text_or_generator)
+
+    # convert every element of i to a text type if necessary
+    text_generator = (el if isinstance(el, string_types) else text_type(el) for el in i)
+
+    from ._termui_impl import pager
+
+    return pager(itertools.chain(text_generator, "\n"), color)
+
+
+def progressbar(
+    iterable=None,
+    length=None,
+    label=None,
+    show_eta=True,
+    show_percent=None,
+    show_pos=False,
+    item_show_func=None,
+    fill_char="#",
+    empty_char="-",
+    bar_template="%(label)s  [%(bar)s]  %(info)s",
+    info_sep="  ",
+    width=36,
+    file=None,
+    color=None,
+):
+    """This function creates an iterable context manager that can be used
+    to iterate over something while showing a progress bar.  It will
+    either iterate over the `iterable` or `length` items (that are counted
+    up).  While iteration happens, this function will print a rendered
+    progress bar to the given `file` (defaults to stdout) and will attempt
+    to calculate remaining time and more.  By default, this progress bar
+    will not be rendered if the file is not a terminal.
+
+    The context manager creates the progress bar.  When the context
+    manager is entered the progress bar is already created.  With every
+    iteration over the progress bar, the iterable passed to the bar is
+    advanced and the bar is updated.  When the context manager exits,
+    a newline is printed and the progress bar is finalized on screen.
+
+    Note: The progress bar is currently designed for use cases where the
+    total progress can be expected to take at least several seconds.
+    Because of this, the ProgressBar class object won't display
+    progress that is considered too fast, and progress where the time
+    between steps is less than a second.
+
+    No printing must happen or the progress bar will be unintentionally
+    destroyed.
+
+    Example usage::
+
+        with progressbar(items) as bar:
+            for item in bar:
+                do_something_with(item)
+
+    Alternatively, if no iterable is specified, one can manually update the
+    progress bar through the `update()` method instead of directly
+    iterating over the progress bar.  The update method accepts the number
+    of steps to increment the bar with::
+
+        with progressbar(length=chunks.total_bytes) as bar:
+            for chunk in chunks:
+                process_chunk(chunk)
+                bar.update(chunks.bytes)
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 4.0
+       Added the `color` parameter.  Added a `update` method to the
+       progressbar object.
+
+    :param iterable: an iterable to iterate over.  If not provided the length
+                     is required.
+    :param length: the number of items to iterate over.  By default the
+                   progressbar will attempt to ask the iterator about its
+                   length, which might or might not work.  If an iterable is
+                   also provided this parameter can be used to override the
+                   length.  If an iterable is not provided the progress bar
+                   will iterate over a range of that length.
+    :param label: the label to show next to the progress bar.
+    :param show_eta: enables or disables the estimated time display.  This is
+                     automatically disabled if the length cannot be
+                     determined.
+    :param show_percent: enables or disables the percentage display.  The
+                         default is `True` if the iterable has a length or
+                         `False` if not.
+    :param show_pos: enables or disables the absolute position display.  The
+                     default is `False`.
+    :param item_show_func: a function called with the current item which
+                           can return a string to show the current item
+                           next to the progress bar.  Note that the current
+                           item can be `None`!
+    :param fill_char: the character to use to show the filled part of the
+                      progress bar.
+    :param empty_char: the character to use to show the non-filled part of
+                       the progress bar.
+    :param bar_template: the format string to use as template for the bar.
+                         The parameters in it are ``label`` for the label,
+                         ``bar`` for the progress bar and ``info`` for the
+                         info section.
+    :param info_sep: the separator between multiple info items (eta etc.)
+    :param width: the width of the progress bar in characters, 0 means full
+                  terminal width
+    :param file: the file to write to.  If this is not a terminal then
+                 only the label is printed.
+    :param color: controls if the terminal supports ANSI colors or not.  The
+                  default is autodetection.  This is only needed if ANSI
+                  codes are included anywhere in the progress bar output
+                  which is not the case by default.
+    """
+    from ._termui_impl import ProgressBar
+
+    color = resolve_color_default(color)
+    return ProgressBar(
+        iterable=iterable,
+        length=length,
+        show_eta=show_eta,
+        show_percent=show_percent,
+        show_pos=show_pos,
+        item_show_func=item_show_func,
+        fill_char=fill_char,
+        empty_char=empty_char,
+        bar_template=bar_template,
+        info_sep=info_sep,
+        file=file,
+        label=label,
+        width=width,
+        color=color,
+    )
+
+
+def clear():
+    """Clears the terminal screen.  This will have the effect of clearing
+    the whole visible space of the terminal and moving the cursor to the
+    top left.  This does not do anything if not connected to a terminal.
+
+    .. versionadded:: 2.0
+    """
+    if not isatty(sys.stdout):
+        return
+    # If we're on Windows and we don't have colorama available, then we
+    # clear the screen by shelling out.  Otherwise we can use an escape
+    # sequence.
+    if WIN:
+        os.system("cls")
+    else:
+        sys.stdout.write("\033[2J\033[1;1H")
+
+
+def style(
+    text,
+    fg=None,
+    bg=None,
+    bold=None,
+    dim=None,
+    underline=None,
+    blink=None,
+    reverse=None,
+    reset=True,
+):
+    """Styles a text with ANSI styles and returns the new string.  By
+    default the styling is self contained which means that at the end
+    of the string a reset code is issued.  This can be prevented by
+    passing ``reset=False``.
+
+    Examples::
+
+        click.echo(click.style('Hello World!', fg='green'))
+        click.echo(click.style('ATTENTION!', blink=True))
+        click.echo(click.style('Some things', reverse=True, fg='cyan'))
+
+    Supported color names:
+
+    * ``black`` (might be a gray)
+    * ``red``
+    * ``green``
+    * ``yellow`` (might be an orange)
+    * ``blue``
+    * ``magenta``
+    * ``cyan``
+    * ``white`` (might be light gray)
+    * ``bright_black``
+    * ``bright_red``
+    * ``bright_green``
+    * ``bright_yellow``
+    * ``bright_blue``
+    * ``bright_magenta``
+    * ``bright_cyan``
+    * ``bright_white``
+    * ``reset`` (reset the color code only)
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 7.0
+       Added support for bright colors.
+
+    :param text: the string to style with ansi codes.
+    :param fg: if provided this will become the foreground color.
+    :param bg: if provided this will become the background color.
+    :param bold: if provided this will enable or disable bold mode.
+    :param dim: if provided this will enable or disable dim mode.  This is
+                badly supported.
+    :param underline: if provided this will enable or disable underline.
+    :param blink: if provided this will enable or disable blinking.
+    :param reverse: if provided this will enable or disable inverse
+                    rendering (foreground becomes background and the
+                    other way round).
+    :param reset: by default a reset-all code is added at the end of the
+                  string which means that styles do not carry over.  This
+                  can be disabled to compose styles.
+    """
+    bits = []
+    if fg:
+        try:
+            bits.append("\033[{}m".format(_ansi_colors[fg]))
+        except KeyError:
+            raise TypeError("Unknown color '{}'".format(fg))
+    if bg:
+        try:
+            bits.append("\033[{}m".format(_ansi_colors[bg] + 10))
+        except KeyError:
+            raise TypeError("Unknown color '{}'".format(bg))
+    if bold is not None:
+        bits.append("\033[{}m".format(1 if bold else 22))
+    if dim is not None:
+        bits.append("\033[{}m".format(2 if dim else 22))
+    if underline is not None:
+        bits.append("\033[{}m".format(4 if underline else 24))
+    if blink is not None:
+        bits.append("\033[{}m".format(5 if blink else 25))
+    if reverse is not None:
+        bits.append("\033[{}m".format(7 if reverse else 27))
+    bits.append(text)
+    if reset:
+        bits.append(_ansi_reset_all)
+    return "".join(bits)
+
+
+def unstyle(text):
+    """Removes ANSI styling information from a string.  Usually it's not
+    necessary to use this function as Click's echo function will
+    automatically remove styling if necessary.
+
+    .. versionadded:: 2.0
+
+    :param text: the text to remove style information from.
+    """
+    return strip_ansi(text)
+
+
+def secho(message=None, file=None, nl=True, err=False, color=None, **styles):
+    """This function combines :func:`echo` and :func:`style` into one
+    call.  As such the following two calls are the same::
+
+        click.secho('Hello World!', fg='green')
+        click.echo(click.style('Hello World!', fg='green'))
+
+    All keyword arguments are forwarded to the underlying functions
+    depending on which one they go with.
+
+    .. versionadded:: 2.0
+    """
+    if message is not None:
+        message = style(message, **styles)
+    return echo(message, file=file, nl=nl, err=err, color=color)
+
+
+def edit(
+    text=None, editor=None, env=None, require_save=True, extension=".txt", filename=None
+):
+    r"""Edits the given text in the defined editor.  If an editor is given
+    (should be the full path to the executable but the regular operating
+    system search path is used for finding the executable) it overrides
+    the detected editor.  Optionally, some environment variables can be
+    used.  If the editor is closed without changes, `None` is returned.  In
+    case a file is edited directly the return value is always `None` and
+    `require_save` and `extension` are ignored.
+
+    If the editor cannot be opened a :exc:`UsageError` is raised.
+
+    Note for Windows: to simplify cross-platform usage, the newlines are
+    automatically converted from POSIX to Windows and vice versa.  As such,
+    the message here will have ``\n`` as newline markers.
+
+    :param text: the text to edit.
+    :param editor: optionally the editor to use.  Defaults to automatic
+                   detection.
+    :param env: environment variables to forward to the editor.
+    :param require_save: if this is true, then not saving in the editor
+                         will make the return value become `None`.
+    :param extension: the extension to tell the editor about.  This defaults
+                      to `.txt` but changing this might change syntax
+                      highlighting.
+    :param filename: if provided it will edit this file instead of the
+                     provided text contents.  It will not use a temporary
+                     file as an indirection in that case.
+    """
+    from ._termui_impl import Editor
+
+    editor = Editor(
+        editor=editor, env=env, require_save=require_save, extension=extension
+    )
+    if filename is None:
+        return editor.edit(text)
+    editor.edit_file(filename)
+
+
+def launch(url, wait=False, locate=False):
+    """This function launches the given URL (or filename) in the default
+    viewer application for this file type.  If this is an executable, it
+    might launch the executable in a new session.  The return value is
+    the exit code of the launched application.  Usually, ``0`` indicates
+    success.
+
+    Examples::
+
+        click.launch('https://click.palletsprojects.com/')
+        click.launch('/my/downloaded/file', locate=True)
+
+    .. versionadded:: 2.0
+
+    :param url: URL or filename of the thing to launch.
+    :param wait: waits for the program to stop.
+    :param locate: if this is set to `True` then instead of launching the
+                   application associated with the URL it will attempt to
+                   launch a file manager with the file located.  This
+                   might have weird effects if the URL does not point to
+                   the filesystem.
+    """
+    from ._termui_impl import open_url
+
+    return open_url(url, wait=wait, locate=locate)
+
+
+# If this is provided, getchar() calls into this instead.  This is used
+# for unittesting purposes.
+_getchar = None
+
+
+def getchar(echo=False):
+    """Fetches a single character from the terminal and returns it.  This
+    will always return a unicode character and under certain rare
+    circumstances this might return more than one character.  The
+    situations which more than one character is returned is when for
+    whatever reason multiple characters end up in the terminal buffer or
+    standard input was not actually a terminal.
+
+    Note that this will always read from the terminal, even if something
+    is piped into the standard input.
+
+    Note for Windows: in rare cases when typing non-ASCII characters, this
+    function might wait for a second character and then return both at once.
+    This is because certain Unicode characters look like special-key markers.
+
+    .. versionadded:: 2.0
+
+    :param echo: if set to `True`, the character read will also show up on
+                 the terminal.  The default is to not show it.
+    """
+    f = _getchar
+    if f is None:
+        from ._termui_impl import getchar as f
+    return f(echo)
+
+
+def raw_terminal():
+    from ._termui_impl import raw_terminal as f
+
+    return f()
+
+
+def pause(info="Press any key to continue ...", err=False):
+    """This command stops execution and waits for the user to press any
+    key to continue.  This is similar to the Windows batch "pause"
+    command.  If the program is not run through a terminal, this command
+    will instead do nothing.
+
+    .. versionadded:: 2.0
+
+    .. versionadded:: 4.0
+       Added the `err` parameter.
+
+    :param info: the info string to print before pausing.
+    :param err: if set to message goes to ``stderr`` instead of
+                ``stdout``, the same as with echo.
+    """
+    if not isatty(sys.stdin) or not isatty(sys.stdout):
+        return
+    try:
+        if info:
+            echo(info, nl=False, err=err)
+        try:
+            getchar()
+        except (KeyboardInterrupt, EOFError):
+            pass
+    finally:
+        if info:
+            echo(err=err)

+ 382 - 0
Lib/site-packages/click/testing.py

@@ -0,0 +1,382 @@
+import contextlib
+import os
+import shlex
+import shutil
+import sys
+import tempfile
+
+from . import formatting
+from . import termui
+from . import utils
+from ._compat import iteritems
+from ._compat import PY2
+from ._compat import string_types
+
+
+if PY2:
+    from cStringIO import StringIO
+else:
+    import io
+    from ._compat import _find_binary_reader
+
+
+class EchoingStdin(object):
+    def __init__(self, input, output):
+        self._input = input
+        self._output = output
+
+    def __getattr__(self, x):
+        return getattr(self._input, x)
+
+    def _echo(self, rv):
+        self._output.write(rv)
+        return rv
+
+    def read(self, n=-1):
+        return self._echo(self._input.read(n))
+
+    def readline(self, n=-1):
+        return self._echo(self._input.readline(n))
+
+    def readlines(self):
+        return [self._echo(x) for x in self._input.readlines()]
+
+    def __iter__(self):
+        return iter(self._echo(x) for x in self._input)
+
+    def __repr__(self):
+        return repr(self._input)
+
+
+def make_input_stream(input, charset):
+    # Is already an input stream.
+    if hasattr(input, "read"):
+        if PY2:
+            return input
+        rv = _find_binary_reader(input)
+        if rv is not None:
+            return rv
+        raise TypeError("Could not find binary reader for input stream.")
+
+    if input is None:
+        input = b""
+    elif not isinstance(input, bytes):
+        input = input.encode(charset)
+    if PY2:
+        return StringIO(input)
+    return io.BytesIO(input)
+
+
+class Result(object):
+    """Holds the captured result of an invoked CLI script."""
+
+    def __init__(
+        self, runner, stdout_bytes, stderr_bytes, exit_code, exception, exc_info=None
+    ):
+        #: The runner that created the result
+        self.runner = runner
+        #: The standard output as bytes.
+        self.stdout_bytes = stdout_bytes
+        #: The standard error as bytes, or None if not available
+        self.stderr_bytes = stderr_bytes
+        #: The exit code as integer.
+        self.exit_code = exit_code
+        #: The exception that happened if one did.
+        self.exception = exception
+        #: The traceback
+        self.exc_info = exc_info
+
+    @property
+    def output(self):
+        """The (standard) output as unicode string."""
+        return self.stdout
+
+    @property
+    def stdout(self):
+        """The standard output as unicode string."""
+        return self.stdout_bytes.decode(self.runner.charset, "replace").replace(
+            "\r\n", "\n"
+        )
+
+    @property
+    def stderr(self):
+        """The standard error as unicode string."""
+        if self.stderr_bytes is None:
+            raise ValueError("stderr not separately captured")
+        return self.stderr_bytes.decode(self.runner.charset, "replace").replace(
+            "\r\n", "\n"
+        )
+
+    def __repr__(self):
+        return "<{} {}>".format(
+            type(self).__name__, repr(self.exception) if self.exception else "okay"
+        )
+
+
+class CliRunner(object):
+    """The CLI runner provides functionality to invoke a Click command line
+    script for unittesting purposes in a isolated environment.  This only
+    works in single-threaded systems without any concurrency as it changes the
+    global interpreter state.
+
+    :param charset: the character set for the input and output data.  This is
+                    UTF-8 by default and should not be changed currently as
+                    the reporting to Click only works in Python 2 properly.
+    :param env: a dictionary with environment variables for overriding.
+    :param echo_stdin: if this is set to `True`, then reading from stdin writes
+                       to stdout.  This is useful for showing examples in
+                       some circumstances.  Note that regular prompts
+                       will automatically echo the input.
+    :param mix_stderr: if this is set to `False`, then stdout and stderr are
+                       preserved as independent streams.  This is useful for
+                       Unix-philosophy apps that have predictable stdout and
+                       noisy stderr, such that each may be measured
+                       independently
+    """
+
+    def __init__(self, charset=None, env=None, echo_stdin=False, mix_stderr=True):
+        if charset is None:
+            charset = "utf-8"
+        self.charset = charset
+        self.env = env or {}
+        self.echo_stdin = echo_stdin
+        self.mix_stderr = mix_stderr
+
+    def get_default_prog_name(self, cli):
+        """Given a command object it will return the default program name
+        for it.  The default is the `name` attribute or ``"root"`` if not
+        set.
+        """
+        return cli.name or "root"
+
+    def make_env(self, overrides=None):
+        """Returns the environment overrides for invoking a script."""
+        rv = dict(self.env)
+        if overrides:
+            rv.update(overrides)
+        return rv
+
+    @contextlib.contextmanager
+    def isolation(self, input=None, env=None, color=False):
+        """A context manager that sets up the isolation for invoking of a
+        command line tool.  This sets up stdin with the given input data
+        and `os.environ` with the overrides from the given dictionary.
+        This also rebinds some internals in Click to be mocked (like the
+        prompt functionality).
+
+        This is automatically done in the :meth:`invoke` method.
+
+        .. versionadded:: 4.0
+           The ``color`` parameter was added.
+
+        :param input: the input stream to put into sys.stdin.
+        :param env: the environment overrides as dictionary.
+        :param color: whether the output should contain color codes. The
+                      application can still override this explicitly.
+        """
+        input = make_input_stream(input, self.charset)
+
+        old_stdin = sys.stdin
+        old_stdout = sys.stdout
+        old_stderr = sys.stderr
+        old_forced_width = formatting.FORCED_WIDTH
+        formatting.FORCED_WIDTH = 80
+
+        env = self.make_env(env)
+
+        if PY2:
+            bytes_output = StringIO()
+            if self.echo_stdin:
+                input = EchoingStdin(input, bytes_output)
+            sys.stdout = bytes_output
+            if not self.mix_stderr:
+                bytes_error = StringIO()
+                sys.stderr = bytes_error
+        else:
+            bytes_output = io.BytesIO()
+            if self.echo_stdin:
+                input = EchoingStdin(input, bytes_output)
+            input = io.TextIOWrapper(input, encoding=self.charset)
+            sys.stdout = io.TextIOWrapper(bytes_output, encoding=self.charset)
+            if not self.mix_stderr:
+                bytes_error = io.BytesIO()
+                sys.stderr = io.TextIOWrapper(bytes_error, encoding=self.charset)
+
+        if self.mix_stderr:
+            sys.stderr = sys.stdout
+
+        sys.stdin = input
+
+        def visible_input(prompt=None):
+            sys.stdout.write(prompt or "")
+            val = input.readline().rstrip("\r\n")
+            sys.stdout.write("{}\n".format(val))
+            sys.stdout.flush()
+            return val
+
+        def hidden_input(prompt=None):
+            sys.stdout.write("{}\n".format(prompt or ""))
+            sys.stdout.flush()
+            return input.readline().rstrip("\r\n")
+
+        def _getchar(echo):
+            char = sys.stdin.read(1)
+            if echo:
+                sys.stdout.write(char)
+                sys.stdout.flush()
+            return char
+
+        default_color = color
+
+        def should_strip_ansi(stream=None, color=None):
+            if color is None:
+                return not default_color
+            return not color
+
+        old_visible_prompt_func = termui.visible_prompt_func
+        old_hidden_prompt_func = termui.hidden_prompt_func
+        old__getchar_func = termui._getchar
+        old_should_strip_ansi = utils.should_strip_ansi
+        termui.visible_prompt_func = visible_input
+        termui.hidden_prompt_func = hidden_input
+        termui._getchar = _getchar
+        utils.should_strip_ansi = should_strip_ansi
+
+        old_env = {}
+        try:
+            for key, value in iteritems(env):
+                old_env[key] = os.environ.get(key)
+                if value is None:
+                    try:
+                        del os.environ[key]
+                    except Exception:
+                        pass
+                else:
+                    os.environ[key] = value
+            yield (bytes_output, not self.mix_stderr and bytes_error)
+        finally:
+            for key, value in iteritems(old_env):
+                if value is None:
+                    try:
+                        del os.environ[key]
+                    except Exception:
+                        pass
+                else:
+                    os.environ[key] = value
+            sys.stdout = old_stdout
+            sys.stderr = old_stderr
+            sys.stdin = old_stdin
+            termui.visible_prompt_func = old_visible_prompt_func
+            termui.hidden_prompt_func = old_hidden_prompt_func
+            termui._getchar = old__getchar_func
+            utils.should_strip_ansi = old_should_strip_ansi
+            formatting.FORCED_WIDTH = old_forced_width
+
+    def invoke(
+        self,
+        cli,
+        args=None,
+        input=None,
+        env=None,
+        catch_exceptions=True,
+        color=False,
+        **extra
+    ):
+        """Invokes a command in an isolated environment.  The arguments are
+        forwarded directly to the command line script, the `extra` keyword
+        arguments are passed to the :meth:`~clickpkg.Command.main` function of
+        the command.
+
+        This returns a :class:`Result` object.
+
+        .. versionadded:: 3.0
+           The ``catch_exceptions`` parameter was added.
+
+        .. versionchanged:: 3.0
+           The result object now has an `exc_info` attribute with the
+           traceback if available.
+
+        .. versionadded:: 4.0
+           The ``color`` parameter was added.
+
+        :param cli: the command to invoke
+        :param args: the arguments to invoke. It may be given as an iterable
+                     or a string. When given as string it will be interpreted
+                     as a Unix shell command. More details at
+                     :func:`shlex.split`.
+        :param input: the input data for `sys.stdin`.
+        :param env: the environment overrides.
+        :param catch_exceptions: Whether to catch any other exceptions than
+                                 ``SystemExit``.
+        :param extra: the keyword arguments to pass to :meth:`main`.
+        :param color: whether the output should contain color codes. The
+                      application can still override this explicitly.
+        """
+        exc_info = None
+        with self.isolation(input=input, env=env, color=color) as outstreams:
+            exception = None
+            exit_code = 0
+
+            if isinstance(args, string_types):
+                args = shlex.split(args)
+
+            try:
+                prog_name = extra.pop("prog_name")
+            except KeyError:
+                prog_name = self.get_default_prog_name(cli)
+
+            try:
+                cli.main(args=args or (), prog_name=prog_name, **extra)
+            except SystemExit as e:
+                exc_info = sys.exc_info()
+                exit_code = e.code
+                if exit_code is None:
+                    exit_code = 0
+
+                if exit_code != 0:
+                    exception = e
+
+                if not isinstance(exit_code, int):
+                    sys.stdout.write(str(exit_code))
+                    sys.stdout.write("\n")
+                    exit_code = 1
+
+            except Exception as e:
+                if not catch_exceptions:
+                    raise
+                exception = e
+                exit_code = 1
+                exc_info = sys.exc_info()
+            finally:
+                sys.stdout.flush()
+                stdout = outstreams[0].getvalue()
+                if self.mix_stderr:
+                    stderr = None
+                else:
+                    stderr = outstreams[1].getvalue()
+
+        return Result(
+            runner=self,
+            stdout_bytes=stdout,
+            stderr_bytes=stderr,
+            exit_code=exit_code,
+            exception=exception,
+            exc_info=exc_info,
+        )
+
+    @contextlib.contextmanager
+    def isolated_filesystem(self):
+        """A context manager that creates a temporary folder and changes
+        the current working directory to it for isolated filesystem tests.
+        """
+        cwd = os.getcwd()
+        t = tempfile.mkdtemp()
+        os.chdir(t)
+        try:
+            yield t
+        finally:
+            os.chdir(cwd)
+            try:
+                shutil.rmtree(t)
+            except (OSError, IOError):  # noqa: B014
+                pass

+ 762 - 0
Lib/site-packages/click/types.py

@@ -0,0 +1,762 @@
+import os
+import stat
+from datetime import datetime
+
+from ._compat import _get_argv_encoding
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_streerror
+from ._compat import open_stream
+from ._compat import PY2
+from ._compat import text_type
+from .exceptions import BadParameter
+from .utils import LazyFile
+from .utils import safecall
+
+
+class ParamType(object):
+    """Helper for converting values through types.  The following is
+    necessary for a valid type:
+
+    *   it needs a name
+    *   it needs to pass through None unchanged
+    *   it needs to convert from a string
+    *   it needs to convert its result type through unchanged
+        (eg: needs to be idempotent)
+    *   it needs to be able to deal with param and context being `None`.
+        This can be the case when the object is used with prompt
+        inputs.
+    """
+
+    is_composite = False
+
+    #: the descriptive name of this type
+    name = None
+
+    #: if a list of this type is expected and the value is pulled from a
+    #: string environment variable, this is what splits it up.  `None`
+    #: means any whitespace.  For all parameters the general rule is that
+    #: whitespace splits them up.  The exception are paths and files which
+    #: are split by ``os.path.pathsep`` by default (":" on Unix and ";" on
+    #: Windows).
+    envvar_list_splitter = None
+
+    def __call__(self, value, param=None, ctx=None):
+        if value is not None:
+            return self.convert(value, param, ctx)
+
+    def get_metavar(self, param):
+        """Returns the metavar default for this param if it provides one."""
+
+    def get_missing_message(self, param):
+        """Optionally might return extra information about a missing
+        parameter.
+
+        .. versionadded:: 2.0
+        """
+
+    def convert(self, value, param, ctx):
+        """Converts the value.  This is not invoked for values that are
+        `None` (the missing value).
+        """
+        return value
+
+    def split_envvar_value(self, rv):
+        """Given a value from an environment variable this splits it up
+        into small chunks depending on the defined envvar list splitter.
+
+        If the splitter is set to `None`, which means that whitespace splits,
+        then leading and trailing whitespace is ignored.  Otherwise, leading
+        and trailing splitters usually lead to empty items being included.
+        """
+        return (rv or "").split(self.envvar_list_splitter)
+
+    def fail(self, message, param=None, ctx=None):
+        """Helper method to fail with an invalid value message."""
+        raise BadParameter(message, ctx=ctx, param=param)
+
+
+class CompositeParamType(ParamType):
+    is_composite = True
+
+    @property
+    def arity(self):
+        raise NotImplementedError()
+
+
+class FuncParamType(ParamType):
+    def __init__(self, func):
+        self.name = func.__name__
+        self.func = func
+
+    def convert(self, value, param, ctx):
+        try:
+            return self.func(value)
+        except ValueError:
+            try:
+                value = text_type(value)
+            except UnicodeError:
+                value = str(value).decode("utf-8", "replace")
+            self.fail(value, param, ctx)
+
+
+class UnprocessedParamType(ParamType):
+    name = "text"
+
+    def convert(self, value, param, ctx):
+        return value
+
+    def __repr__(self):
+        return "UNPROCESSED"
+
+
+class StringParamType(ParamType):
+    name = "text"
+
+    def convert(self, value, param, ctx):
+        if isinstance(value, bytes):
+            enc = _get_argv_encoding()
+            try:
+                value = value.decode(enc)
+            except UnicodeError:
+                fs_enc = get_filesystem_encoding()
+                if fs_enc != enc:
+                    try:
+                        value = value.decode(fs_enc)
+                    except UnicodeError:
+                        value = value.decode("utf-8", "replace")
+                else:
+                    value = value.decode("utf-8", "replace")
+            return value
+        return value
+
+    def __repr__(self):
+        return "STRING"
+
+
+class Choice(ParamType):
+    """The choice type allows a value to be checked against a fixed set
+    of supported values. All of these values have to be strings.
+
+    You should only pass a list or tuple of choices. Other iterables
+    (like generators) may lead to surprising results.
+
+    The resulting value will always be one of the originally passed choices
+    regardless of ``case_sensitive`` or any ``ctx.token_normalize_func``
+    being specified.
+
+    See :ref:`choice-opts` for an example.
+
+    :param case_sensitive: Set to false to make choices case
+        insensitive. Defaults to true.
+    """
+
+    name = "choice"
+
+    def __init__(self, choices, case_sensitive=True):
+        self.choices = choices
+        self.case_sensitive = case_sensitive
+
+    def get_metavar(self, param):
+        return "[{}]".format("|".join(self.choices))
+
+    def get_missing_message(self, param):
+        return "Choose from:\n\t{}.".format(",\n\t".join(self.choices))
+
+    def convert(self, value, param, ctx):
+        # Match through normalization and case sensitivity
+        # first do token_normalize_func, then lowercase
+        # preserve original `value` to produce an accurate message in
+        # `self.fail`
+        normed_value = value
+        normed_choices = {choice: choice for choice in self.choices}
+
+        if ctx is not None and ctx.token_normalize_func is not None:
+            normed_value = ctx.token_normalize_func(value)
+            normed_choices = {
+                ctx.token_normalize_func(normed_choice): original
+                for normed_choice, original in normed_choices.items()
+            }
+
+        if not self.case_sensitive:
+            if PY2:
+                lower = str.lower
+            else:
+                lower = str.casefold
+
+            normed_value = lower(normed_value)
+            normed_choices = {
+                lower(normed_choice): original
+                for normed_choice, original in normed_choices.items()
+            }
+
+        if normed_value in normed_choices:
+            return normed_choices[normed_value]
+
+        self.fail(
+            "invalid choice: {}. (choose from {})".format(
+                value, ", ".join(self.choices)
+            ),
+            param,
+            ctx,
+        )
+
+    def __repr__(self):
+        return "Choice('{}')".format(list(self.choices))
+
+
+class DateTime(ParamType):
+    """The DateTime type converts date strings into `datetime` objects.
+
+    The format strings which are checked are configurable, but default to some
+    common (non-timezone aware) ISO 8601 formats.
+
+    When specifying *DateTime* formats, you should only pass a list or a tuple.
+    Other iterables, like generators, may lead to surprising results.
+
+    The format strings are processed using ``datetime.strptime``, and this
+    consequently defines the format strings which are allowed.
+
+    Parsing is tried using each format, in order, and the first format which
+    parses successfully is used.
+
+    :param formats: A list or tuple of date format strings, in the order in
+                    which they should be tried. Defaults to
+                    ``'%Y-%m-%d'``, ``'%Y-%m-%dT%H:%M:%S'``,
+                    ``'%Y-%m-%d %H:%M:%S'``.
+    """
+
+    name = "datetime"
+
+    def __init__(self, formats=None):
+        self.formats = formats or ["%Y-%m-%d", "%Y-%m-%dT%H:%M:%S", "%Y-%m-%d %H:%M:%S"]
+
+    def get_metavar(self, param):
+        return "[{}]".format("|".join(self.formats))
+
+    def _try_to_convert_date(self, value, format):
+        try:
+            return datetime.strptime(value, format)
+        except ValueError:
+            return None
+
+    def convert(self, value, param, ctx):
+        # Exact match
+        for format in self.formats:
+            dtime = self._try_to_convert_date(value, format)
+            if dtime:
+                return dtime
+
+        self.fail(
+            "invalid datetime format: {}. (choose from {})".format(
+                value, ", ".join(self.formats)
+            )
+        )
+
+    def __repr__(self):
+        return "DateTime"
+
+
+class IntParamType(ParamType):
+    name = "integer"
+
+    def convert(self, value, param, ctx):
+        try:
+            return int(value)
+        except ValueError:
+            self.fail("{} is not a valid integer".format(value), param, ctx)
+
+    def __repr__(self):
+        return "INT"
+
+
+class IntRange(IntParamType):
+    """A parameter that works similar to :data:`click.INT` but restricts
+    the value to fit into a range.  The default behavior is to fail if the
+    value falls outside the range, but it can also be silently clamped
+    between the two edges.
+
+    See :ref:`ranges` for an example.
+    """
+
+    name = "integer range"
+
+    def __init__(self, min=None, max=None, clamp=False):
+        self.min = min
+        self.max = max
+        self.clamp = clamp
+
+    def convert(self, value, param, ctx):
+        rv = IntParamType.convert(self, value, param, ctx)
+        if self.clamp:
+            if self.min is not None and rv < self.min:
+                return self.min
+            if self.max is not None and rv > self.max:
+                return self.max
+        if (
+            self.min is not None
+            and rv < self.min
+            or self.max is not None
+            and rv > self.max
+        ):
+            if self.min is None:
+                self.fail(
+                    "{} is bigger than the maximum valid value {}.".format(
+                        rv, self.max
+                    ),
+                    param,
+                    ctx,
+                )
+            elif self.max is None:
+                self.fail(
+                    "{} is smaller than the minimum valid value {}.".format(
+                        rv, self.min
+                    ),
+                    param,
+                    ctx,
+                )
+            else:
+                self.fail(
+                    "{} is not in the valid range of {} to {}.".format(
+                        rv, self.min, self.max
+                    ),
+                    param,
+                    ctx,
+                )
+        return rv
+
+    def __repr__(self):
+        return "IntRange({}, {})".format(self.min, self.max)
+
+
+class FloatParamType(ParamType):
+    name = "float"
+
+    def convert(self, value, param, ctx):
+        try:
+            return float(value)
+        except ValueError:
+            self.fail(
+                "{} is not a valid floating point value".format(value), param, ctx
+            )
+
+    def __repr__(self):
+        return "FLOAT"
+
+
+class FloatRange(FloatParamType):
+    """A parameter that works similar to :data:`click.FLOAT` but restricts
+    the value to fit into a range.  The default behavior is to fail if the
+    value falls outside the range, but it can also be silently clamped
+    between the two edges.
+
+    See :ref:`ranges` for an example.
+    """
+
+    name = "float range"
+
+    def __init__(self, min=None, max=None, clamp=False):
+        self.min = min
+        self.max = max
+        self.clamp = clamp
+
+    def convert(self, value, param, ctx):
+        rv = FloatParamType.convert(self, value, param, ctx)
+        if self.clamp:
+            if self.min is not None and rv < self.min:
+                return self.min
+            if self.max is not None and rv > self.max:
+                return self.max
+        if (
+            self.min is not None
+            and rv < self.min
+            or self.max is not None
+            and rv > self.max
+        ):
+            if self.min is None:
+                self.fail(
+                    "{} is bigger than the maximum valid value {}.".format(
+                        rv, self.max
+                    ),
+                    param,
+                    ctx,
+                )
+            elif self.max is None:
+                self.fail(
+                    "{} is smaller than the minimum valid value {}.".format(
+                        rv, self.min
+                    ),
+                    param,
+                    ctx,
+                )
+            else:
+                self.fail(
+                    "{} is not in the valid range of {} to {}.".format(
+                        rv, self.min, self.max
+                    ),
+                    param,
+                    ctx,
+                )
+        return rv
+
+    def __repr__(self):
+        return "FloatRange({}, {})".format(self.min, self.max)
+
+
+class BoolParamType(ParamType):
+    name = "boolean"
+
+    def convert(self, value, param, ctx):
+        if isinstance(value, bool):
+            return bool(value)
+        value = value.lower()
+        if value in ("true", "t", "1", "yes", "y"):
+            return True
+        elif value in ("false", "f", "0", "no", "n"):
+            return False
+        self.fail("{} is not a valid boolean".format(value), param, ctx)
+
+    def __repr__(self):
+        return "BOOL"
+
+
+class UUIDParameterType(ParamType):
+    name = "uuid"
+
+    def convert(self, value, param, ctx):
+        import uuid
+
+        try:
+            if PY2 and isinstance(value, text_type):
+                value = value.encode("ascii")
+            return uuid.UUID(value)
+        except ValueError:
+            self.fail("{} is not a valid UUID value".format(value), param, ctx)
+
+    def __repr__(self):
+        return "UUID"
+
+
+class File(ParamType):
+    """Declares a parameter to be a file for reading or writing.  The file
+    is automatically closed once the context tears down (after the command
+    finished working).
+
+    Files can be opened for reading or writing.  The special value ``-``
+    indicates stdin or stdout depending on the mode.
+
+    By default, the file is opened for reading text data, but it can also be
+    opened in binary mode or for writing.  The encoding parameter can be used
+    to force a specific encoding.
+
+    The `lazy` flag controls if the file should be opened immediately or upon
+    first IO. The default is to be non-lazy for standard input and output
+    streams as well as files opened for reading, `lazy` otherwise. When opening a
+    file lazily for reading, it is still opened temporarily for validation, but
+    will not be held open until first IO. lazy is mainly useful when opening
+    for writing to avoid creating the file until it is needed.
+
+    Starting with Click 2.0, files can also be opened atomically in which
+    case all writes go into a separate file in the same folder and upon
+    completion the file will be moved over to the original location.  This
+    is useful if a file regularly read by other users is modified.
+
+    See :ref:`file-args` for more information.
+    """
+
+    name = "filename"
+    envvar_list_splitter = os.path.pathsep
+
+    def __init__(
+        self, mode="r", encoding=None, errors="strict", lazy=None, atomic=False
+    ):
+        self.mode = mode
+        self.encoding = encoding
+        self.errors = errors
+        self.lazy = lazy
+        self.atomic = atomic
+
+    def resolve_lazy_flag(self, value):
+        if self.lazy is not None:
+            return self.lazy
+        if value == "-":
+            return False
+        elif "w" in self.mode:
+            return True
+        return False
+
+    def convert(self, value, param, ctx):
+        try:
+            if hasattr(value, "read") or hasattr(value, "write"):
+                return value
+
+            lazy = self.resolve_lazy_flag(value)
+
+            if lazy:
+                f = LazyFile(
+                    value, self.mode, self.encoding, self.errors, atomic=self.atomic
+                )
+                if ctx is not None:
+                    ctx.call_on_close(f.close_intelligently)
+                return f
+
+            f, should_close = open_stream(
+                value, self.mode, self.encoding, self.errors, atomic=self.atomic
+            )
+            # If a context is provided, we automatically close the file
+            # at the end of the context execution (or flush out).  If a
+            # context does not exist, it's the caller's responsibility to
+            # properly close the file.  This for instance happens when the
+            # type is used with prompts.
+            if ctx is not None:
+                if should_close:
+                    ctx.call_on_close(safecall(f.close))
+                else:
+                    ctx.call_on_close(safecall(f.flush))
+            return f
+        except (IOError, OSError) as e:  # noqa: B014
+            self.fail(
+                "Could not open file: {}: {}".format(
+                    filename_to_ui(value), get_streerror(e)
+                ),
+                param,
+                ctx,
+            )
+
+
+class Path(ParamType):
+    """The path type is similar to the :class:`File` type but it performs
+    different checks.  First of all, instead of returning an open file
+    handle it returns just the filename.  Secondly, it can perform various
+    basic checks about what the file or directory should be.
+
+    .. versionchanged:: 6.0
+       `allow_dash` was added.
+
+    :param exists: if set to true, the file or directory needs to exist for
+                   this value to be valid.  If this is not required and a
+                   file does indeed not exist, then all further checks are
+                   silently skipped.
+    :param file_okay: controls if a file is a possible value.
+    :param dir_okay: controls if a directory is a possible value.
+    :param writable: if true, a writable check is performed.
+    :param readable: if true, a readable check is performed.
+    :param resolve_path: if this is true, then the path is fully resolved
+                         before the value is passed onwards.  This means
+                         that it's absolute and symlinks are resolved.  It
+                         will not expand a tilde-prefix, as this is
+                         supposed to be done by the shell only.
+    :param allow_dash: If this is set to `True`, a single dash to indicate
+                       standard streams is permitted.
+    :param path_type: optionally a string type that should be used to
+                      represent the path.  The default is `None` which
+                      means the return value will be either bytes or
+                      unicode depending on what makes most sense given the
+                      input data Click deals with.
+    """
+
+    envvar_list_splitter = os.path.pathsep
+
+    def __init__(
+        self,
+        exists=False,
+        file_okay=True,
+        dir_okay=True,
+        writable=False,
+        readable=True,
+        resolve_path=False,
+        allow_dash=False,
+        path_type=None,
+    ):
+        self.exists = exists
+        self.file_okay = file_okay
+        self.dir_okay = dir_okay
+        self.writable = writable
+        self.readable = readable
+        self.resolve_path = resolve_path
+        self.allow_dash = allow_dash
+        self.type = path_type
+
+        if self.file_okay and not self.dir_okay:
+            self.name = "file"
+            self.path_type = "File"
+        elif self.dir_okay and not self.file_okay:
+            self.name = "directory"
+            self.path_type = "Directory"
+        else:
+            self.name = "path"
+            self.path_type = "Path"
+
+    def coerce_path_result(self, rv):
+        if self.type is not None and not isinstance(rv, self.type):
+            if self.type is text_type:
+                rv = rv.decode(get_filesystem_encoding())
+            else:
+                rv = rv.encode(get_filesystem_encoding())
+        return rv
+
+    def convert(self, value, param, ctx):
+        rv = value
+
+        is_dash = self.file_okay and self.allow_dash and rv in (b"-", "-")
+
+        if not is_dash:
+            if self.resolve_path:
+                rv = os.path.realpath(rv)
+
+            try:
+                st = os.stat(rv)
+            except OSError:
+                if not self.exists:
+                    return self.coerce_path_result(rv)
+                self.fail(
+                    "{} '{}' does not exist.".format(
+                        self.path_type, filename_to_ui(value)
+                    ),
+                    param,
+                    ctx,
+                )
+
+            if not self.file_okay and stat.S_ISREG(st.st_mode):
+                self.fail(
+                    "{} '{}' is a file.".format(self.path_type, filename_to_ui(value)),
+                    param,
+                    ctx,
+                )
+            if not self.dir_okay and stat.S_ISDIR(st.st_mode):
+                self.fail(
+                    "{} '{}' is a directory.".format(
+                        self.path_type, filename_to_ui(value)
+                    ),
+                    param,
+                    ctx,
+                )
+            if self.writable and not os.access(value, os.W_OK):
+                self.fail(
+                    "{} '{}' is not writable.".format(
+                        self.path_type, filename_to_ui(value)
+                    ),
+                    param,
+                    ctx,
+                )
+            if self.readable and not os.access(value, os.R_OK):
+                self.fail(
+                    "{} '{}' is not readable.".format(
+                        self.path_type, filename_to_ui(value)
+                    ),
+                    param,
+                    ctx,
+                )
+
+        return self.coerce_path_result(rv)
+
+
+class Tuple(CompositeParamType):
+    """The default behavior of Click is to apply a type on a value directly.
+    This works well in most cases, except for when `nargs` is set to a fixed
+    count and different types should be used for different items.  In this
+    case the :class:`Tuple` type can be used.  This type can only be used
+    if `nargs` is set to a fixed number.
+
+    For more information see :ref:`tuple-type`.
+
+    This can be selected by using a Python tuple literal as a type.
+
+    :param types: a list of types that should be used for the tuple items.
+    """
+
+    def __init__(self, types):
+        self.types = [convert_type(ty) for ty in types]
+
+    @property
+    def name(self):
+        return "<{}>".format(" ".join(ty.name for ty in self.types))
+
+    @property
+    def arity(self):
+        return len(self.types)
+
+    def convert(self, value, param, ctx):
+        if len(value) != len(self.types):
+            raise TypeError(
+                "It would appear that nargs is set to conflict with the"
+                " composite type arity."
+            )
+        return tuple(ty(x, param, ctx) for ty, x in zip(self.types, value))
+
+
+def convert_type(ty, default=None):
+    """Converts a callable or python type into the most appropriate
+    param type.
+    """
+    guessed_type = False
+    if ty is None and default is not None:
+        if isinstance(default, tuple):
+            ty = tuple(map(type, default))
+        else:
+            ty = type(default)
+        guessed_type = True
+
+    if isinstance(ty, tuple):
+        return Tuple(ty)
+    if isinstance(ty, ParamType):
+        return ty
+    if ty is text_type or ty is str or ty is None:
+        return STRING
+    if ty is int:
+        return INT
+    # Booleans are only okay if not guessed.  This is done because for
+    # flags the default value is actually a bit of a lie in that it
+    # indicates which of the flags is the one we want.  See get_default()
+    # for more information.
+    if ty is bool and not guessed_type:
+        return BOOL
+    if ty is float:
+        return FLOAT
+    if guessed_type:
+        return STRING
+
+    # Catch a common mistake
+    if __debug__:
+        try:
+            if issubclass(ty, ParamType):
+                raise AssertionError(
+                    "Attempted to use an uninstantiated parameter type ({}).".format(ty)
+                )
+        except TypeError:
+            pass
+    return FuncParamType(ty)
+
+
+#: A dummy parameter type that just does nothing.  From a user's
+#: perspective this appears to just be the same as `STRING` but internally
+#: no string conversion takes place.  This is necessary to achieve the
+#: same bytes/unicode behavior on Python 2/3 in situations where you want
+#: to not convert argument types.  This is usually useful when working
+#: with file paths as they can appear in bytes and unicode.
+#:
+#: For path related uses the :class:`Path` type is a better choice but
+#: there are situations where an unprocessed type is useful which is why
+#: it is is provided.
+#:
+#: .. versionadded:: 4.0
+UNPROCESSED = UnprocessedParamType()
+
+#: A unicode string parameter type which is the implicit default.  This
+#: can also be selected by using ``str`` as type.
+STRING = StringParamType()
+
+#: An integer parameter.  This can also be selected by using ``int`` as
+#: type.
+INT = IntParamType()
+
+#: A floating point value parameter.  This can also be selected by using
+#: ``float`` as type.
+FLOAT = FloatParamType()
+
+#: A boolean parameter.  This is the default for boolean flags.  This can
+#: also be selected by using ``bool`` as a type.
+BOOL = BoolParamType()
+
+#: A UUID parameter.
+UUID = UUIDParameterType()

+ 455 - 0
Lib/site-packages/click/utils.py

@@ -0,0 +1,455 @@
+import os
+import sys
+
+from ._compat import _default_text_stderr
+from ._compat import _default_text_stdout
+from ._compat import auto_wrap_for_ansi
+from ._compat import binary_streams
+from ._compat import filename_to_ui
+from ._compat import get_filesystem_encoding
+from ._compat import get_streerror
+from ._compat import is_bytes
+from ._compat import open_stream
+from ._compat import PY2
+from ._compat import should_strip_ansi
+from ._compat import string_types
+from ._compat import strip_ansi
+from ._compat import text_streams
+from ._compat import text_type
+from ._compat import WIN
+from .globals import resolve_color_default
+
+if not PY2:
+    from ._compat import _find_binary_writer
+elif WIN:
+    from ._winconsole import _get_windows_argv
+    from ._winconsole import _hash_py_argv
+    from ._winconsole import _initial_argv_hash
+
+echo_native_types = string_types + (bytes, bytearray)
+
+
+def _posixify(name):
+    return "-".join(name.split()).lower()
+
+
+def safecall(func):
+    """Wraps a function so that it swallows exceptions."""
+
+    def wrapper(*args, **kwargs):
+        try:
+            return func(*args, **kwargs)
+        except Exception:
+            pass
+
+    return wrapper
+
+
+def make_str(value):
+    """Converts a value into a valid string."""
+    if isinstance(value, bytes):
+        try:
+            return value.decode(get_filesystem_encoding())
+        except UnicodeError:
+            return value.decode("utf-8", "replace")
+    return text_type(value)
+
+
+def make_default_short_help(help, max_length=45):
+    """Return a condensed version of help string."""
+    words = help.split()
+    total_length = 0
+    result = []
+    done = False
+
+    for word in words:
+        if word[-1:] == ".":
+            done = True
+        new_length = 1 + len(word) if result else len(word)
+        if total_length + new_length > max_length:
+            result.append("...")
+            done = True
+        else:
+            if result:
+                result.append(" ")
+            result.append(word)
+        if done:
+            break
+        total_length += new_length
+
+    return "".join(result)
+
+
+class LazyFile(object):
+    """A lazy file works like a regular file but it does not fully open
+    the file but it does perform some basic checks early to see if the
+    filename parameter does make sense.  This is useful for safely opening
+    files for writing.
+    """
+
+    def __init__(
+        self, filename, mode="r", encoding=None, errors="strict", atomic=False
+    ):
+        self.name = filename
+        self.mode = mode
+        self.encoding = encoding
+        self.errors = errors
+        self.atomic = atomic
+
+        if filename == "-":
+            self._f, self.should_close = open_stream(filename, mode, encoding, errors)
+        else:
+            if "r" in mode:
+                # Open and close the file in case we're opening it for
+                # reading so that we can catch at least some errors in
+                # some cases early.
+                open(filename, mode).close()
+            self._f = None
+            self.should_close = True
+
+    def __getattr__(self, name):
+        return getattr(self.open(), name)
+
+    def __repr__(self):
+        if self._f is not None:
+            return repr(self._f)
+        return "<unopened file '{}' {}>".format(self.name, self.mode)
+
+    def open(self):
+        """Opens the file if it's not yet open.  This call might fail with
+        a :exc:`FileError`.  Not handling this error will produce an error
+        that Click shows.
+        """
+        if self._f is not None:
+            return self._f
+        try:
+            rv, self.should_close = open_stream(
+                self.name, self.mode, self.encoding, self.errors, atomic=self.atomic
+            )
+        except (IOError, OSError) as e:  # noqa: E402
+            from .exceptions import FileError
+
+            raise FileError(self.name, hint=get_streerror(e))
+        self._f = rv
+        return rv
+
+    def close(self):
+        """Closes the underlying file, no matter what."""
+        if self._f is not None:
+            self._f.close()
+
+    def close_intelligently(self):
+        """This function only closes the file if it was opened by the lazy
+        file wrapper.  For instance this will never close stdin.
+        """
+        if self.should_close:
+            self.close()
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        self.close_intelligently()
+
+    def __iter__(self):
+        self.open()
+        return iter(self._f)
+
+
+class KeepOpenFile(object):
+    def __init__(self, file):
+        self._file = file
+
+    def __getattr__(self, name):
+        return getattr(self._file, name)
+
+    def __enter__(self):
+        return self
+
+    def __exit__(self, exc_type, exc_value, tb):
+        pass
+
+    def __repr__(self):
+        return repr(self._file)
+
+    def __iter__(self):
+        return iter(self._file)
+
+
+def echo(message=None, file=None, nl=True, err=False, color=None):
+    """Prints a message plus a newline to the given file or stdout.  On
+    first sight, this looks like the print function, but it has improved
+    support for handling Unicode and binary data that does not fail no
+    matter how badly configured the system is.
+
+    Primarily it means that you can print binary data as well as Unicode
+    data on both 2.x and 3.x to the given file in the most appropriate way
+    possible.  This is a very carefree function in that it will try its
+    best to not fail.  As of Click 6.0 this includes support for unicode
+    output on the Windows console.
+
+    In addition to that, if `colorama`_ is installed, the echo function will
+    also support clever handling of ANSI codes.  Essentially it will then
+    do the following:
+
+    -   add transparent handling of ANSI color codes on Windows.
+    -   hide ANSI codes automatically if the destination file is not a
+        terminal.
+
+    .. _colorama: https://pypi.org/project/colorama/
+
+    .. versionchanged:: 6.0
+       As of Click 6.0 the echo function will properly support unicode
+       output on the windows console.  Not that click does not modify
+       the interpreter in any way which means that `sys.stdout` or the
+       print statement or function will still not provide unicode support.
+
+    .. versionchanged:: 2.0
+       Starting with version 2.0 of Click, the echo function will work
+       with colorama if it's installed.
+
+    .. versionadded:: 3.0
+       The `err` parameter was added.
+
+    .. versionchanged:: 4.0
+       Added the `color` flag.
+
+    :param message: the message to print
+    :param file: the file to write to (defaults to ``stdout``)
+    :param err: if set to true the file defaults to ``stderr`` instead of
+                ``stdout``.  This is faster and easier than calling
+                :func:`get_text_stderr` yourself.
+    :param nl: if set to `True` (the default) a newline is printed afterwards.
+    :param color: controls if the terminal supports ANSI colors or not.  The
+                  default is autodetection.
+    """
+    if file is None:
+        if err:
+            file = _default_text_stderr()
+        else:
+            file = _default_text_stdout()
+
+    # Convert non bytes/text into the native string type.
+    if message is not None and not isinstance(message, echo_native_types):
+        message = text_type(message)
+
+    if nl:
+        message = message or u""
+        if isinstance(message, text_type):
+            message += u"\n"
+        else:
+            message += b"\n"
+
+    # If there is a message, and we're in Python 3, and the value looks
+    # like bytes, we manually need to find the binary stream and write the
+    # message in there.  This is done separately so that most stream
+    # types will work as you would expect.  Eg: you can write to StringIO
+    # for other cases.
+    if message and not PY2 and is_bytes(message):
+        binary_file = _find_binary_writer(file)
+        if binary_file is not None:
+            file.flush()
+            binary_file.write(message)
+            binary_file.flush()
+            return
+
+    # ANSI-style support.  If there is no message or we are dealing with
+    # bytes nothing is happening.  If we are connected to a file we want
+    # to strip colors.  If we are on windows we either wrap the stream
+    # to strip the color or we use the colorama support to translate the
+    # ansi codes to API calls.
+    if message and not is_bytes(message):
+        color = resolve_color_default(color)
+        if should_strip_ansi(file, color):
+            message = strip_ansi(message)
+        elif WIN:
+            if auto_wrap_for_ansi is not None:
+                file = auto_wrap_for_ansi(file)
+            elif not color:
+                message = strip_ansi(message)
+
+    if message:
+        file.write(message)
+    file.flush()
+
+
+def get_binary_stream(name):
+    """Returns a system stream for byte processing.  This essentially
+    returns the stream from the sys module with the given name but it
+    solves some compatibility issues between different Python versions.
+    Primarily this function is necessary for getting binary streams on
+    Python 3.
+
+    :param name: the name of the stream to open.  Valid names are ``'stdin'``,
+                 ``'stdout'`` and ``'stderr'``
+    """
+    opener = binary_streams.get(name)
+    if opener is None:
+        raise TypeError("Unknown standard stream '{}'".format(name))
+    return opener()
+
+
+def get_text_stream(name, encoding=None, errors="strict"):
+    """Returns a system stream for text processing.  This usually returns
+    a wrapped stream around a binary stream returned from
+    :func:`get_binary_stream` but it also can take shortcuts on Python 3
+    for already correctly configured streams.
+
+    :param name: the name of the stream to open.  Valid names are ``'stdin'``,
+                 ``'stdout'`` and ``'stderr'``
+    :param encoding: overrides the detected default encoding.
+    :param errors: overrides the default error mode.
+    """
+    opener = text_streams.get(name)
+    if opener is None:
+        raise TypeError("Unknown standard stream '{}'".format(name))
+    return opener(encoding, errors)
+
+
+def open_file(
+    filename, mode="r", encoding=None, errors="strict", lazy=False, atomic=False
+):
+    """This is similar to how the :class:`File` works but for manual
+    usage.  Files are opened non lazy by default.  This can open regular
+    files as well as stdin/stdout if ``'-'`` is passed.
+
+    If stdin/stdout is returned the stream is wrapped so that the context
+    manager will not close the stream accidentally.  This makes it possible
+    to always use the function like this without having to worry to
+    accidentally close a standard stream::
+
+        with open_file(filename) as f:
+            ...
+
+    .. versionadded:: 3.0
+
+    :param filename: the name of the file to open (or ``'-'`` for stdin/stdout).
+    :param mode: the mode in which to open the file.
+    :param encoding: the encoding to use.
+    :param errors: the error handling for this file.
+    :param lazy: can be flipped to true to open the file lazily.
+    :param atomic: in atomic mode writes go into a temporary file and it's
+                   moved on close.
+    """
+    if lazy:
+        return LazyFile(filename, mode, encoding, errors, atomic=atomic)
+    f, should_close = open_stream(filename, mode, encoding, errors, atomic=atomic)
+    if not should_close:
+        f = KeepOpenFile(f)
+    return f
+
+
+def get_os_args():
+    """This returns the argument part of sys.argv in the most appropriate
+    form for processing.  What this means is that this return value is in
+    a format that works for Click to process but does not necessarily
+    correspond well to what's actually standard for the interpreter.
+
+    On most environments the return value is ``sys.argv[:1]`` unchanged.
+    However if you are on Windows and running Python 2 the return value
+    will actually be a list of unicode strings instead because the
+    default behavior on that platform otherwise will not be able to
+    carry all possible values that sys.argv can have.
+
+    .. versionadded:: 6.0
+    """
+    # We can only extract the unicode argv if sys.argv has not been
+    # changed since the startup of the application.
+    if PY2 and WIN and _initial_argv_hash == _hash_py_argv():
+        return _get_windows_argv()
+    return sys.argv[1:]
+
+
+def format_filename(filename, shorten=False):
+    """Formats a filename for user display.  The main purpose of this
+    function is to ensure that the filename can be displayed at all.  This
+    will decode the filename to unicode if necessary in a way that it will
+    not fail.  Optionally, it can shorten the filename to not include the
+    full path to the filename.
+
+    :param filename: formats a filename for UI display.  This will also convert
+                     the filename into unicode without failing.
+    :param shorten: this optionally shortens the filename to strip of the
+                    path that leads up to it.
+    """
+    if shorten:
+        filename = os.path.basename(filename)
+    return filename_to_ui(filename)
+
+
+def get_app_dir(app_name, roaming=True, force_posix=False):
+    r"""Returns the config folder for the application.  The default behavior
+    is to return whatever is most appropriate for the operating system.
+
+    To give you an idea, for an app called ``"Foo Bar"``, something like
+    the following folders could be returned:
+
+    Mac OS X:
+      ``~/Library/Application Support/Foo Bar``
+    Mac OS X (POSIX):
+      ``~/.foo-bar``
+    Unix:
+      ``~/.config/foo-bar``
+    Unix (POSIX):
+      ``~/.foo-bar``
+    Win XP (roaming):
+      ``C:\Documents and Settings\<user>\Local Settings\Application Data\Foo Bar``
+    Win XP (not roaming):
+      ``C:\Documents and Settings\<user>\Application Data\Foo Bar``
+    Win 7 (roaming):
+      ``C:\Users\<user>\AppData\Roaming\Foo Bar``
+    Win 7 (not roaming):
+      ``C:\Users\<user>\AppData\Local\Foo Bar``
+
+    .. versionadded:: 2.0
+
+    :param app_name: the application name.  This should be properly capitalized
+                     and can contain whitespace.
+    :param roaming: controls if the folder should be roaming or not on Windows.
+                    Has no affect otherwise.
+    :param force_posix: if this is set to `True` then on any POSIX system the
+                        folder will be stored in the home folder with a leading
+                        dot instead of the XDG config home or darwin's
+                        application support folder.
+    """
+    if WIN:
+        key = "APPDATA" if roaming else "LOCALAPPDATA"
+        folder = os.environ.get(key)
+        if folder is None:
+            folder = os.path.expanduser("~")
+        return os.path.join(folder, app_name)
+    if force_posix:
+        return os.path.join(os.path.expanduser("~/.{}".format(_posixify(app_name))))
+    if sys.platform == "darwin":
+        return os.path.join(
+            os.path.expanduser("~/Library/Application Support"), app_name
+        )
+    return os.path.join(
+        os.environ.get("XDG_CONFIG_HOME", os.path.expanduser("~/.config")),
+        _posixify(app_name),
+    )
+
+
+class PacifyFlushWrapper(object):
+    """This wrapper is used to catch and suppress BrokenPipeErrors resulting
+    from ``.flush()`` being called on broken pipe during the shutdown/final-GC
+    of the Python interpreter. Notably ``.flush()`` is always called on
+    ``sys.stdout`` and ``sys.stderr``. So as to have minimal impact on any
+    other cleanup code, and the case where the underlying file is not a broken
+    pipe, all calls and attributes are proxied.
+    """
+
+    def __init__(self, wrapped):
+        self.wrapped = wrapped
+
+    def flush(self):
+        try:
+            self.wrapped.flush()
+        except IOError as e:
+            import errno
+
+            if e.errno != errno.EPIPE:
+                raise
+
+    def __getattr__(self, attr):
+        return getattr(self.wrapped, attr)

+ 5 - 0
Lib/site-packages/easy_install.py

@@ -0,0 +1,5 @@
+"""Run the EasyInstall command"""
+
+if __name__ == '__main__':
+    from setuptools.command.easy_install import main
+    main()

+ 1 - 0
Lib/site-packages/future-0.18.2.dist-info/INSTALLER

@@ -0,0 +1 @@
+pip

+ 19 - 0
Lib/site-packages/future-0.18.2.dist-info/LICENSE.txt

@@ -0,0 +1,19 @@
+Copyright (c) 2013-2019 Python Charmers Pty Ltd, Australia
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.

+ 109 - 0
Lib/site-packages/future-0.18.2.dist-info/METADATA

@@ -0,0 +1,109 @@
+Metadata-Version: 2.1
+Name: future
+Version: 0.18.2
+Summary: Clean single-source support for Python 3 and 2
+Home-page: https://python-future.org
+Author: Ed Schofield
+Author-email: ed@pythoncharmers.com
+License: MIT
+Keywords: future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2
+Platform: UNKNOWN
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+Classifier: Programming Language :: Python :: 3.6
+Classifier: Programming Language :: Python :: 3.7
+Classifier: License :: OSI Approved
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Requires-Python: >=2.6, !=3.0.*, !=3.1.*, !=3.2.*
+
+
+future: Easy, safe support for Python 2/3 compatibility
+=======================================================
+
+``future`` is the missing compatibility layer between Python 2 and Python
+3. It allows you to use a single, clean Python 3.x-compatible codebase to
+support both Python 2 and Python 3 with minimal overhead.
+
+It is designed to be used as follows::
+
+    from __future__ import (absolute_import, division,
+                            print_function, unicode_literals)
+    from builtins import (
+             bytes, dict, int, list, object, range, str,
+             ascii, chr, hex, input, next, oct, open,
+             pow, round, super,
+             filter, map, zip)
+
+followed by predominantly standard, idiomatic Python 3 code that then runs
+similarly on Python 2.6/2.7 and Python 3.3+.
+
+The imports have no effect on Python 3. On Python 2, they shadow the
+corresponding builtins, which normally have different semantics on Python 3
+versus 2, to provide their Python 3 semantics.
+
+
+Standard library reorganization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``future`` supports the standard library reorganization (PEP 3108) through the
+following Py3 interfaces:
+
+    >>> # Top-level packages with Py3 names provided on Py2:
+    >>> import html.parser
+    >>> import queue
+    >>> import tkinter.dialog
+    >>> import xmlrpc.client
+    >>> # etc.
+
+    >>> # Aliases provided for extensions to existing Py2 module names:
+    >>> from future.standard_library import install_aliases
+    >>> install_aliases()
+
+    >>> from collections import Counter, OrderedDict   # backported to Py2.6
+    >>> from collections import UserDict, UserList, UserString
+    >>> import urllib.request
+    >>> from itertools import filterfalse, zip_longest
+    >>> from subprocess import getoutput, getstatusoutput
+
+
+Automatic conversion
+--------------------
+
+An included script called `futurize
+<http://python-future.org/automatic_conversion.html>`_ aids in converting
+code (from either Python 2 or Python 3) to code compatible with both
+platforms. It is similar to ``python-modernize`` but goes further in
+providing Python 3 compatibility through the use of the backported types
+and builtin functions in ``future``.
+
+
+Documentation
+-------------
+
+See: http://python-future.org
+
+
+Credits
+-------
+
+:Author:  Ed Schofield, Jordan M. Adler, et al
+:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
+          Ltd, Singapore. http://pythoncharmers.com
+:Others:  See docs/credits.rst or http://python-future.org/credits.html
+
+
+Licensing
+---------
+Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
+The software is distributed under an MIT licence. See LICENSE.txt.
+
+
+

+ 415 - 0
Lib/site-packages/future-0.18.2.dist-info/RECORD

@@ -0,0 +1,415 @@
+../../Scripts/futurize.exe,sha256=ubMJDDHO-vUcjbuZ7LxQ8M4_IEvSrb0ACChaO8CujPg,106371
+../../Scripts/pasteurize.exe,sha256=sY8QOkw4OQBJxN16KCmrBP1U-HWR7a5CFp-eZ4KP1yM,106373
+future-0.18.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
+future-0.18.2.dist-info/LICENSE.txt,sha256=kW5WE5LUhHG5wjQ39W4mUvMgyzsRnOqhYu30EBb3Rrk,1083
+future-0.18.2.dist-info/METADATA,sha256=fkY-mhLBh40f490kVFZ3hkvu2OVGdLIp5x-oJpqF91k,3703
+future-0.18.2.dist-info/RECORD,,
+future-0.18.2.dist-info/WHEEL,sha256=YUYzQ6UQdoqxXjimOitTqynltBCkwY6qlTfTh2IzqQU,97
+future-0.18.2.dist-info/entry_points.txt,sha256=-ATQtLUC2gkzrCYqc1Twac093xrI164NuMwsRALJWnM,89
+future-0.18.2.dist-info/top_level.txt,sha256=DT0C3az2gb-uJaj-fs0h4WwHYlJVDp0EvLdud1y5Zyw,38
+future/__init__.py,sha256=TsDq1XoGk6Jfach_rEhwAi07zR5OKYZ6hhUlG5Bj6Ag,2991
+future/__pycache__/__init__.cpython-38.pyc,,
+future/backports/__init__.py,sha256=5QXvQ_jc5Xx6p4dSaHnZXPZazBEunKDKhbUjxZ0XD1I,530
+future/backports/__pycache__/__init__.cpython-38.pyc,,
+future/backports/__pycache__/_markupbase.cpython-38.pyc,,
+future/backports/__pycache__/datetime.cpython-38.pyc,,
+future/backports/__pycache__/misc.cpython-38.pyc,,
+future/backports/__pycache__/socket.cpython-38.pyc,,
+future/backports/__pycache__/socketserver.cpython-38.pyc,,
+future/backports/__pycache__/total_ordering.cpython-38.pyc,,
+future/backports/_markupbase.py,sha256=MDPTCykLq4J7Aea3PvYotATEE0CG4R_SjlxfJaLXTJM,16215
+future/backports/datetime.py,sha256=I214Vu0cRY8mi8J5aIcsAyQJnWmOKXeLV-QTWSn7VQU,75552
+future/backports/email/__init__.py,sha256=eH3AJr3FkuBy_D6yS1V2K76Q2CQ93y2zmAMWmn8FbHI,2269
+future/backports/email/__pycache__/__init__.cpython-38.pyc,,
+future/backports/email/__pycache__/_encoded_words.cpython-38.pyc,,
+future/backports/email/__pycache__/_header_value_parser.cpython-38.pyc,,
+future/backports/email/__pycache__/_parseaddr.cpython-38.pyc,,
+future/backports/email/__pycache__/_policybase.cpython-38.pyc,,
+future/backports/email/__pycache__/base64mime.cpython-38.pyc,,
+future/backports/email/__pycache__/charset.cpython-38.pyc,,
+future/backports/email/__pycache__/encoders.cpython-38.pyc,,
+future/backports/email/__pycache__/errors.cpython-38.pyc,,
+future/backports/email/__pycache__/feedparser.cpython-38.pyc,,
+future/backports/email/__pycache__/generator.cpython-38.pyc,,
+future/backports/email/__pycache__/header.cpython-38.pyc,,
+future/backports/email/__pycache__/headerregistry.cpython-38.pyc,,
+future/backports/email/__pycache__/iterators.cpython-38.pyc,,
+future/backports/email/__pycache__/message.cpython-38.pyc,,
+future/backports/email/__pycache__/parser.cpython-38.pyc,,
+future/backports/email/__pycache__/policy.cpython-38.pyc,,
+future/backports/email/__pycache__/quoprimime.cpython-38.pyc,,
+future/backports/email/__pycache__/utils.cpython-38.pyc,,
+future/backports/email/_encoded_words.py,sha256=m1vTRfxAQdg4VyWO7PF-1ih1mmq97V-BPyHHkuEwSME,8443
+future/backports/email/_header_value_parser.py,sha256=cj_1ce1voLn8H98r9cKqiSLgfFSxCv3_UL3sSvjqgjk,104692
+future/backports/email/_parseaddr.py,sha256=KewEnos0YDM-SYX503z7E1MmVbG5VRaKjxjcl0Ipjbs,17389
+future/backports/email/_policybase.py,sha256=2lJD9xouiz4uHvWGQ6j1nwlwWVQGwwzpy5JZoeQqhUc,14647
+future/backports/email/base64mime.py,sha256=sey6iJA9pHIOdFgoV1p7QAwYVjt8CEkDhITt304-nyI,3729
+future/backports/email/charset.py,sha256=CfE4iV2zAq6MQC0CHXHLnwTNW71zmhNITbzOcfxE4vY,17439
+future/backports/email/encoders.py,sha256=Nn4Pcx1rOdRgoSIzB6T5RWHl5zxClbf32wgE6D0tUt8,2800
+future/backports/email/errors.py,sha256=tRX8PP5g7mk2bAxL1jTCYrbfhD2gPZFNrh4_GJRM8OQ,3680
+future/backports/email/feedparser.py,sha256=bvmhb4cdY-ipextPK2K2sDgMsNvTspmuQfYyCxc4zSc,22736
+future/backports/email/generator.py,sha256=lpaLhZHneguvZ2QgRu7Figkjb7zmY28AGhj9iZTdI7s,19520
+future/backports/email/header.py,sha256=uBHbNKO-yx5I9KBflernJpyy3fX4gImCB1QE7ICApLs,24448
+future/backports/email/headerregistry.py,sha256=ZPbvLKXD0NMLSU4jXlVHfGyGcLMrFm-GQVURu_XHj88,20637
+future/backports/email/iterators.py,sha256=kMRYFGy3SVVpo7HG7JJr2ZAlOoaX6CVPzKYwDSvLfV0,2348
+future/backports/email/message.py,sha256=I6WW5cZDza7uwLOGJSvsDhGZC9K_Q570Lk2gt_vDUXM,35237
+future/backports/email/mime/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/backports/email/mime/__pycache__/__init__.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/application.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/audio.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/base.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/image.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/message.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/multipart.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/nonmultipart.cpython-38.pyc,,
+future/backports/email/mime/__pycache__/text.cpython-38.pyc,,
+future/backports/email/mime/application.py,sha256=m-5a4mSxu2E32XAImnp9x9eMVX5Vme2iNgn2dMMNyss,1401
+future/backports/email/mime/audio.py,sha256=2ognalFRadcsUYQYMUZbjv5i1xJbFhQN643doMuI7M4,2815
+future/backports/email/mime/base.py,sha256=wV3ClQyMsOqmkXSXbk_wd_zPoPTvBx8kAIzq3rdM4lE,875
+future/backports/email/mime/image.py,sha256=DpQk1sB-IMmO43AF4uadsXyf_y5TdEzJLfyhqR48bIw,1907
+future/backports/email/mime/message.py,sha256=pFsMhXW07aRjsLq1peO847PApWFAl28-Z2Z7BP1Dn74,1429
+future/backports/email/mime/multipart.py,sha256=j4Lf_sJmuwTbfgdQ6R35_t1_ha2DynJBJDvpjwbNObE,1699
+future/backports/email/mime/nonmultipart.py,sha256=Ciba1Z8d2yLDDpxgDJuk3Bb-TqcpE9HCd8KfbW5vgl4,832
+future/backports/email/mime/text.py,sha256=zV98BjoR4S_nX8c47x43LnsnifeGhIfNGwSAh575bs0,1552
+future/backports/email/parser.py,sha256=-115SC3DHZ6lLijWFTxuOnE-GiM2BOYaUSz-QpmvYSo,5312
+future/backports/email/policy.py,sha256=gpcbhVRXuCohkK6MUqopTs1lv4E4-ZVUO6OVncoGEJE,8823
+future/backports/email/quoprimime.py,sha256=w93W5XgdFpyGaDqDBJrnXF_v_npH5r20WuAxmrAzyQg,10923
+future/backports/email/utils.py,sha256=vpfN0E8UjNbNw-2NFBQGCo4TNgrghMsqzpEYW5C_fBs,14270
+future/backports/html/__init__.py,sha256=FKwqFtWMCoGNkhU97OPnR1fZSh6etAKfN1FU1KvXcV8,924
+future/backports/html/__pycache__/__init__.cpython-38.pyc,,
+future/backports/html/__pycache__/entities.cpython-38.pyc,,
+future/backports/html/__pycache__/parser.cpython-38.pyc,,
+future/backports/html/entities.py,sha256=kzoRnQyGk_3DgoucHLhL5QL1pglK9nvmxhPIGZFDTnc,75428
+future/backports/html/parser.py,sha256=G2tUObvbHSotNt06JLY-BP1swaZNfDYFd_ENWDjPmRg,19770
+future/backports/http/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/backports/http/__pycache__/__init__.cpython-38.pyc,,
+future/backports/http/__pycache__/client.cpython-38.pyc,,
+future/backports/http/__pycache__/cookiejar.cpython-38.pyc,,
+future/backports/http/__pycache__/cookies.cpython-38.pyc,,
+future/backports/http/__pycache__/server.cpython-38.pyc,,
+future/backports/http/client.py,sha256=76EbhEZOtvdHFcU-jrjivoff13oQ9IMbdkZEdf5kQzQ,47602
+future/backports/http/cookiejar.py,sha256=_Vy4BPT-h0ZT0R_utGQAFXzuOAdmU9KedGFffyX9wN4,76559
+future/backports/http/cookies.py,sha256=DsyDUGDEbCXAA9Jq6suswSc76uSZqUu39adDDNj8XGw,21581
+future/backports/http/server.py,sha256=1CaMxgzHf9lYhmTJyE7topgjRIlIn9cnjgw8YEvwJV4,45523
+future/backports/misc.py,sha256=AkbED6BdHKnYCmIAontT4zHKTqdPPfJfn35HIs6LDrg,32682
+future/backports/socket.py,sha256=DH1V6IjKPpJ0tln8bYvxvQ7qnvZG-UoQtMA5yVleHiU,15663
+future/backports/socketserver.py,sha256=Twvyk5FqVnOeiNcbVsyMDPTF1mNlkKfyofG7tKxTdD8,24286
+future/backports/test/__init__.py,sha256=9dXxIZnkI095YfHC-XIaVF6d31GjeY1Ag8TEzcFgepM,264
+future/backports/test/__pycache__/__init__.cpython-38.pyc,,
+future/backports/test/__pycache__/pystone.cpython-38.pyc,,
+future/backports/test/__pycache__/ssl_servers.cpython-38.pyc,,
+future/backports/test/__pycache__/support.cpython-38.pyc,,
+future/backports/test/badcert.pem,sha256=JioQeRZkHH8hGsWJjAF3U1zQvcWqhyzG6IOEJpTY9SE,1928
+future/backports/test/badkey.pem,sha256=gaBK9px_gG7DmrLKxfD6f6i-toAmARBTVfs-YGFRQF0,2162
+future/backports/test/dh512.pem,sha256=dUTsjtLbK-femrorUrTGF8qvLjhTiT_n4Uo5V6u__Gs,402
+future/backports/test/https_svn_python_org_root.pem,sha256=wOB3Onnc62Iu9kEFd8GcHhd_suucYjpJNA3jyfHeJWA,2569
+future/backports/test/keycert.passwd.pem,sha256=ZBfnVLpbBtAOf_2gCdiQ-yrBHmRsNzSf8VC3UpQZIjg,1830
+future/backports/test/keycert.pem,sha256=xPXi5idPcQVbrhgxBqF2TNGm6sSZ2aLVVEt6DWzplL8,1783
+future/backports/test/keycert2.pem,sha256=DB46FEAYv8BWwQJ-5RzC696FxPN7CON-Qsi-R4poJgc,1795
+future/backports/test/nokia.pem,sha256=s00x0uPDSaa5DHJ_CwzlVhg3OVdJ47f4zgqQdd0SAfQ,1923
+future/backports/test/nullbytecert.pem,sha256=NFRYWhmP_qT3jGfVjR6-iaC-EQdhIFjiXtTLN5ZPKnE,5435
+future/backports/test/nullcert.pem,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/backports/test/pystone.py,sha256=fvyoJ_tVovTNaxbJmdJMwr9F6SngY-U4ibULnd_wUqA,7427
+future/backports/test/sha256.pem,sha256=3wB-GQqEc7jq-PYwYAQaPbtTvvr7stk_DVmZxFgehfA,8344
+future/backports/test/ssl_cert.pem,sha256=M607jJNeIeHG9BlTf_jaQkPJI4nOxSJPn-zmEAaW43M,867
+future/backports/test/ssl_key.passwd.pem,sha256=I_WH4sBw9Vs9Z-BvmuXY0aw8tx8avv6rm5UL4S_pP00,963
+future/backports/test/ssl_key.pem,sha256=VKGU-R3UYaZpVTXl7chWl4vEYEDeob69SfvRTQ8aq_4,916
+future/backports/test/ssl_servers.py,sha256=-pd7HMZljuZfFRAbCAiAP_2G04orITJFj-S9ddr6o84,7209
+future/backports/test/support.py,sha256=zJrb-pz-Wu2dZwnNodg1v3w96zVq7ORuN-hOGOHbdA8,70881
+future/backports/total_ordering.py,sha256=O3M57_IisQ-zW5hW20uxkfk4fTGsr0EF2tAKx3BksQo,1929
+future/backports/urllib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/backports/urllib/__pycache__/__init__.cpython-38.pyc,,
+future/backports/urllib/__pycache__/error.cpython-38.pyc,,
+future/backports/urllib/__pycache__/parse.cpython-38.pyc,,
+future/backports/urllib/__pycache__/request.cpython-38.pyc,,
+future/backports/urllib/__pycache__/response.cpython-38.pyc,,
+future/backports/urllib/__pycache__/robotparser.cpython-38.pyc,,
+future/backports/urllib/error.py,sha256=ktikuK9ag4lS4f8Z0k5p1F11qF40N2AiOtjbXiF97ew,2715
+future/backports/urllib/parse.py,sha256=67avrYqV1UK7i_22goRUrvJ8SffzjRdTja9wzq_ynXY,35792
+future/backports/urllib/request.py,sha256=aR9ZMzfhV1C2Qk3wFsGvkwxqtdPTdsJVGRt5DUCwgJ8,96276
+future/backports/urllib/response.py,sha256=ooQyswwbb-9N6IVi1Kwjss1aR-Kvm8ZNezoyVEonp8c,3180
+future/backports/urllib/robotparser.py,sha256=pnAGTbKhdbCq_9yMZp7m8hj5q_NJpyQX6oQIZuYcnkw,6865
+future/backports/xmlrpc/__init__.py,sha256=h61ciVTdVvu8oEUXv4dHf_Tc5XUXDH3RKB1-8fQhSsg,38
+future/backports/xmlrpc/__pycache__/__init__.cpython-38.pyc,,
+future/backports/xmlrpc/__pycache__/client.cpython-38.pyc,,
+future/backports/xmlrpc/__pycache__/server.cpython-38.pyc,,
+future/backports/xmlrpc/client.py,sha256=6a6Pvx_RVC9gIHDkFOVdREeGaZckOOiWd7T6GyzU3qU,48133
+future/backports/xmlrpc/server.py,sha256=W_RW5hgYbNV2LGbnvngzm7akacRdK-XFY-Cy2HL-qsY,37285
+future/builtins/__init__.py,sha256=jSdOucWfCsfkfTR8Jd4-Ls-YQpJ0AnzUomBxgwuoxNs,1687
+future/builtins/__pycache__/__init__.cpython-38.pyc,,
+future/builtins/__pycache__/disabled.cpython-38.pyc,,
+future/builtins/__pycache__/iterators.cpython-38.pyc,,
+future/builtins/__pycache__/misc.cpython-38.pyc,,
+future/builtins/__pycache__/new_min_max.cpython-38.pyc,,
+future/builtins/__pycache__/newnext.cpython-38.pyc,,
+future/builtins/__pycache__/newround.cpython-38.pyc,,
+future/builtins/__pycache__/newsuper.cpython-38.pyc,,
+future/builtins/disabled.py,sha256=Ysq74bsmwntpq7dzkwTAD7IHKrkXy66vJlPshVwgVBI,2109
+future/builtins/iterators.py,sha256=l1Zawm2x82oqOuGGtCZRE76Ej98sMlHQwu9fZLK5RrA,1396
+future/builtins/misc.py,sha256=hctlKKWUyN0Eoodxg4ySQHEqARTukOLR4L5K5c6PW9k,4550
+future/builtins/new_min_max.py,sha256=7qQ4iiG4GDgRzjPzzzmg9pdby35Mtt6xNOOsyqHnIGY,1757
+future/builtins/newnext.py,sha256=oxXB8baXqJv29YG40aCS9UXk9zObyoOjya8BJ7NdBJM,2009
+future/builtins/newround.py,sha256=l2EXPAFU3fAsZigJxUH6x66B7jhNaB076-L5FR617R8,3181
+future/builtins/newsuper.py,sha256=LmiUQ_f6NXDIz6v6sDPkoTWl-2Zccy7PpZfQKYtscac,4146
+future/moves/__init__.py,sha256=MsAW69Xp_fqUo4xODufcKM6AZf-ozHaz44WPZdsDFJA,220
+future/moves/__pycache__/__init__.cpython-38.pyc,,
+future/moves/__pycache__/_dummy_thread.cpython-38.pyc,,
+future/moves/__pycache__/_markupbase.cpython-38.pyc,,
+future/moves/__pycache__/_thread.cpython-38.pyc,,
+future/moves/__pycache__/builtins.cpython-38.pyc,,
+future/moves/__pycache__/collections.cpython-38.pyc,,
+future/moves/__pycache__/configparser.cpython-38.pyc,,
+future/moves/__pycache__/copyreg.cpython-38.pyc,,
+future/moves/__pycache__/itertools.cpython-38.pyc,,
+future/moves/__pycache__/pickle.cpython-38.pyc,,
+future/moves/__pycache__/queue.cpython-38.pyc,,
+future/moves/__pycache__/reprlib.cpython-38.pyc,,
+future/moves/__pycache__/socketserver.cpython-38.pyc,,
+future/moves/__pycache__/subprocess.cpython-38.pyc,,
+future/moves/__pycache__/sys.cpython-38.pyc,,
+future/moves/__pycache__/winreg.cpython-38.pyc,,
+future/moves/_dummy_thread.py,sha256=c8ZRUd8ffvyvGKGGgve5NKc8VdtAWquu8-4FnO2EdvA,175
+future/moves/_markupbase.py,sha256=W9wh_Gu3jDAMIhVBV1ZnCkJwYLHRk_v_su_HLALBkZQ,171
+future/moves/_thread.py,sha256=rwY7L4BZMFPlrp_i6T2Un4_iKYwnrXJ-yV6FJZN8YDo,163
+future/moves/builtins.py,sha256=4sjjKiylecJeL9da_RaBZjdymX2jtMs84oA9lCqb4Ug,281
+future/moves/collections.py,sha256=OKQ-TfUgms_2bnZRn4hrclLDoiN2i-HSWcjs3BC2iY8,417
+future/moves/configparser.py,sha256=TNy226uCbljjU-DjAVo7j7Effbj5zxXvDh0SdXehbzk,146
+future/moves/copyreg.py,sha256=Y3UjLXIMSOxZggXtvZucE9yv4tkKZtVan45z8eix4sU,438
+future/moves/dbm/__init__.py,sha256=_VkvQHC2UcIgZFPRroiX_P0Fs7HNqS_69flR0-oq2B8,488
+future/moves/dbm/__pycache__/__init__.cpython-38.pyc,,
+future/moves/dbm/__pycache__/dumb.cpython-38.pyc,,
+future/moves/dbm/__pycache__/gnu.cpython-38.pyc,,
+future/moves/dbm/__pycache__/ndbm.cpython-38.pyc,,
+future/moves/dbm/dumb.py,sha256=HKdjjtO3EyP9EKi1Hgxh_eUU6yCQ0fBX9NN3n-zb8JE,166
+future/moves/dbm/gnu.py,sha256=XoCSEpZ2QaOgo2h1m80GW7NUgj_b93BKtbcuwgtnaKo,162
+future/moves/dbm/ndbm.py,sha256=OFnreyo_1YHDBl5YUm9gCzKlN1MHgWbfSQAZVls2jaM,162
+future/moves/html/__init__.py,sha256=BSUFSHxXf2kGvHozlnrB1nn6bPE6p4PpN3DwA_Z5geo,1016
+future/moves/html/__pycache__/__init__.cpython-38.pyc,,
+future/moves/html/__pycache__/entities.cpython-38.pyc,,
+future/moves/html/__pycache__/parser.cpython-38.pyc,,
+future/moves/html/entities.py,sha256=lVvchdjK_RzRj759eg4RMvGWHfgBbj0tKGOoZ8dbRyY,177
+future/moves/html/parser.py,sha256=V2XpHLKLCxQum3N9xlO3IUccAD7BIykZMqdEcWET3vY,167
+future/moves/http/__init__.py,sha256=Mx1v_Tcks4udHCtDM8q2xnYUiQ01gD7EpPyeQwsP3-Q,71
+future/moves/http/__pycache__/__init__.cpython-38.pyc,,
+future/moves/http/__pycache__/client.cpython-38.pyc,,
+future/moves/http/__pycache__/cookiejar.cpython-38.pyc,,
+future/moves/http/__pycache__/cookies.cpython-38.pyc,,
+future/moves/http/__pycache__/server.cpython-38.pyc,,
+future/moves/http/client.py,sha256=hqEBq7GDXZidd1AscKnSyjSoMcuj8rERqGTmD7VheDQ,165
+future/moves/http/cookiejar.py,sha256=Frr9ZZCg-145ymy0VGpiPJhvBEpJtVqRBYPaKhgT1Z4,173
+future/moves/http/cookies.py,sha256=PPrHa1_oDbu3D_BhJGc6PvMgY1KoxyYq1jqeJwEcMvE,233
+future/moves/http/server.py,sha256=8YQlSCShjAsB5rr5foVvZgp3IzwYFvTmGZCHhBSDtaI,606
+future/moves/itertools.py,sha256=PVxFHRlBQl9ElS0cuGFPcUtj53eHX7Z1DmggzGfgQ6c,158
+future/moves/pickle.py,sha256=r8j9skzfE8ZCeHyh_OB-WucOkRTIHN7zpRM7l7V3qS4,229
+future/moves/queue.py,sha256=uxvLCChF-zxWWgrY1a_wxt8rp2jILdwO4PrnkBW6VTE,160
+future/moves/reprlib.py,sha256=Nt5sUgMQ3jeVIukqSHOvB0UIsl6Y5t-mmT_13mpZmiY,161
+future/moves/socketserver.py,sha256=v8ZLurDxHOgsubYm1iefjlpnnJQcx2VuRUGt9FCJB9k,174
+future/moves/subprocess.py,sha256=oqRSMfFZkxM4MXkt3oD5N6eBwmmJ6rQ9KPhvSQKT_hM,251
+future/moves/sys.py,sha256=HOMRX4Loim75FMbWawd3oEwuGNJR-ClMREEFkVpBsRs,132
+future/moves/test/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110
+future/moves/test/__pycache__/__init__.cpython-38.pyc,,
+future/moves/test/__pycache__/support.cpython-38.pyc,,
+future/moves/test/support.py,sha256=6zGgTTXcERyBJIQ04-X-sAe781tVgLVHp3HzmQPy52g,259
+future/moves/tkinter/__init__.py,sha256=jV9vDx3wRl0bsoclU8oSe-5SqHQ3YpCbStmqtXnq1p4,620
+future/moves/tkinter/__pycache__/__init__.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/colorchooser.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/commondialog.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/constants.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/dialog.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/dnd.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/filedialog.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/font.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/messagebox.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/scrolledtext.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/simpledialog.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/tix.cpython-38.pyc,,
+future/moves/tkinter/__pycache__/ttk.cpython-38.pyc,,
+future/moves/tkinter/colorchooser.py,sha256=kprlmpRtvDbW5Gq43H1mi2KmNJ2kuzLQOba0a5EwDkU,333
+future/moves/tkinter/commondialog.py,sha256=mdUbq1IZqOGaSA7_8R367IukDCsMfzXiVHrTQQpp7Z0,333
+future/moves/tkinter/constants.py,sha256=0qRUrZLRPdVxueABL9KTzzEWEsk6xM1rOjxK6OHxXtA,324
+future/moves/tkinter/dialog.py,sha256=ksp-zvs-_A90P9RNHS8S27f1k8f48zG2Bel2jwZV5y0,311
+future/moves/tkinter/dnd.py,sha256=C_Ah0Urnyf2XKE5u-oP6mWi16RzMSXgMA1uhBSAwKY8,306
+future/moves/tkinter/filedialog.py,sha256=RSJFDGOP2AJ4T0ZscJ2hyF9ssOWp9t_S_DtnOmT-WZ8,323
+future/moves/tkinter/font.py,sha256=TXarflhJRxqepaRNSDw6JFUVGz5P1T1C4_uF9VRqj3w,309
+future/moves/tkinter/messagebox.py,sha256=WJt4t83kLmr_UnpCWFuLoyazZr3wAUOEl6ADn3osoEA,327
+future/moves/tkinter/scrolledtext.py,sha256=DRzN8aBAlDBUo1B2KDHzdpRSzXBfH4rOOz0iuHXbQcg,329
+future/moves/tkinter/simpledialog.py,sha256=6MhuVhZCJV4XfPpPSUWKlDLLGEi0Y2ZlGQ9TbsmJFL0,329
+future/moves/tkinter/tix.py,sha256=aNeOfbWSGmcN69UmEGf4tJ-QIxLT6SU5ynzm1iWgepA,302
+future/moves/tkinter/ttk.py,sha256=rRrJpDjcP2gjQNukECu4F026P-CkW-3Ca2tN6Oia-Fw,302
+future/moves/urllib/__init__.py,sha256=yB9F-fDQpzu1v8cBoKgIrL2ScUNqjlkqEztYrGVCQ-0,110
+future/moves/urllib/__pycache__/__init__.cpython-38.pyc,,
+future/moves/urllib/__pycache__/error.cpython-38.pyc,,
+future/moves/urllib/__pycache__/parse.cpython-38.pyc,,
+future/moves/urllib/__pycache__/request.cpython-38.pyc,,
+future/moves/urllib/__pycache__/response.cpython-38.pyc,,
+future/moves/urllib/__pycache__/robotparser.cpython-38.pyc,,
+future/moves/urllib/error.py,sha256=gfrKzv-6W5OjzNIfjvJaQkxABRLym2KwjfKFXSdDB60,479
+future/moves/urllib/parse.py,sha256=xLLUMIIB5MreCdYzRZ5zIRWrhTRCoMO8RZEH4WPFQDY,1045
+future/moves/urllib/request.py,sha256=ttIzq60PwjRyrLQUGdAtfYvs4fziVwvcLe2Kw-hvE0g,3496
+future/moves/urllib/response.py,sha256=ZEZML0FpbB--GIeBFPvSzbtlVJ6EsR4tCI4qB7D8sFQ,342
+future/moves/urllib/robotparser.py,sha256=j24p6dMNzUpGZtT3BQxwRoE-F88iWmBpKgu0tRV61FQ,179
+future/moves/winreg.py,sha256=2zNAG59QI7vFlCj7kqDh0JrAYTpexOnI55PEAIjYhqo,163
+future/moves/xmlrpc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/moves/xmlrpc/__pycache__/__init__.cpython-38.pyc,,
+future/moves/xmlrpc/__pycache__/client.cpython-38.pyc,,
+future/moves/xmlrpc/__pycache__/server.cpython-38.pyc,,
+future/moves/xmlrpc/client.py,sha256=2PfnL5IbKVwdKP7C8B1OUviEtuBObwoH4pAPfvHIvQc,143
+future/moves/xmlrpc/server.py,sha256=ESDXdpUgTKyeFmCDSnJmBp8zONjJklsRJOvy4OtaALc,143
+future/standard_library/__init__.py,sha256=7paz9IsD5qv_tvk5Rre3YrlA2_2aS1FJfI7UlrzAtWY,27743
+future/standard_library/__pycache__/__init__.cpython-38.pyc,,
+future/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
+future/tests/__pycache__/__init__.cpython-38.pyc,,
+future/tests/__pycache__/base.cpython-38.pyc,,
+future/tests/base.py,sha256=7LTAKHJgUxOwmffD1kgcErVt2VouKcldPnq4iruqk_k,19956
+future/types/__init__.py,sha256=5fBxWqf_OTQ8jZ7k2TS34rFH14togeR488F4zBHIQ-s,6831
+future/types/__pycache__/__init__.cpython-38.pyc,,
+future/types/__pycache__/newbytes.cpython-38.pyc,,
+future/types/__pycache__/newdict.cpython-38.pyc,,
+future/types/__pycache__/newint.cpython-38.pyc,,
+future/types/__pycache__/newlist.cpython-38.pyc,,
+future/types/__pycache__/newmemoryview.cpython-38.pyc,,
+future/types/__pycache__/newobject.cpython-38.pyc,,
+future/types/__pycache__/newopen.cpython-38.pyc,,
+future/types/__pycache__/newrange.cpython-38.pyc,,
+future/types/__pycache__/newstr.cpython-38.pyc,,
+future/types/newbytes.py,sha256=D_kNDD9sbNJir2cUxxePiAuw2OW5irxVnu55uHmuK9E,16303
+future/types/newdict.py,sha256=2N7P44cWmWtiDHvlK5ir15mW492gg6uP2n65d5bsDy4,3100
+future/types/newint.py,sha256=hJiv9qUDrjl1xkfzNFNLzafsRMPoFcRFceoivUzVIek,13286
+future/types/newlist.py,sha256=-H5-fXodd-UQgTFnZBJdwE68CrgIL_jViYdv4w7q2rU,2284
+future/types/newmemoryview.py,sha256=LnARgiKqQ2zLwwDZ3owu1atoonPQkOneWMfxJCwB4_o,712
+future/types/newobject.py,sha256=AX_n8GwlDR2IY-xIwZCvu0Olj_Ca2aS57nlTihnFr-I,3358
+future/types/newopen.py,sha256=lcRNHWZ1UjEn_0_XKis1ZA5U6l-4c-CHlC0WX1sY4NI,810
+future/types/newrange.py,sha256=7sgJaRaC4WIUtZ40K-c1d5QWruyaCWGgTVFadKo8qYA,5294
+future/types/newstr.py,sha256=e0brkurI0IK--4ToQEO4Cz1FECZav4CyUGMKxlrcmK4,15758
+future/utils/__init__.py,sha256=wsvXsKx-DXZichQ10Rdml-CWMqS79RNNynmdvfISpCU,21828
+future/utils/__pycache__/__init__.cpython-38.pyc,,
+future/utils/__pycache__/surrogateescape.cpython-38.pyc,,
+future/utils/surrogateescape.py,sha256=7u4V4XlW83P5YSAJS2f92YUF8vsWthsiTnmAshOJL_M,6097
+libfuturize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31
+libfuturize/__pycache__/__init__.cpython-38.pyc,,
+libfuturize/__pycache__/fixer_util.cpython-38.pyc,,
+libfuturize/__pycache__/main.cpython-38.pyc,,
+libfuturize/fixer_util.py,sha256=Zhms5G97l40pyG1krQM2lCp-TxnocBdJkB2AbkAFnKY,17494
+libfuturize/fixes/__init__.py,sha256=5KEpUnjVsFCCsr_-zrikvJbLf9zslEJnFTH_5pBc33I,5236
+libfuturize/fixes/__pycache__/__init__.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_UserDict.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_absolute_import.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_add__future__imports_except_unicode_literals.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_basestring.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_bytes.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_cmp.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_division.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_division_safe.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_execfile.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_future_builtins.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_future_standard_library.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_future_standard_library_urllib.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_input.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_metaclass.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_next_call.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_object.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_oldstr_wrap.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_order___future__imports.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_print.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_print_with_import.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_raise.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_remove_old__future__imports.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_unicode_keep_u.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_unicode_literals_import.cpython-38.pyc,,
+libfuturize/fixes/__pycache__/fix_xrange_with_import.cpython-38.pyc,,
+libfuturize/fixes/fix_UserDict.py,sha256=jL4jXnGaUQTkG8RKfGXbU_HVTkB3MWZMQwUkqMAjB6I,3840
+libfuturize/fixes/fix_absolute_import.py,sha256=vkrF2FyQR5lSz2WmdqywzkEJVTC0eq4gh_REWBKHh7w,3140
+libfuturize/fixes/fix_add__future__imports_except_unicode_literals.py,sha256=Fr219VAzR8KWXc2_bfiqLl10EgxAWjL6cI3Mowt--VU,662
+libfuturize/fixes/fix_basestring.py,sha256=bHkKuMzhr5FMXwjXlMOjsod4S3rQkVdbzhoWV4-tl3Y,394
+libfuturize/fixes/fix_bytes.py,sha256=AhzOJes6EnPwgzboDjvURANbWKqciG6ZGaYW07PYQK8,685
+libfuturize/fixes/fix_cmp.py,sha256=Blq_Z0IGkYiKS83QzZ5wUgpJyZfQiZoEsWJ1VPyXgFY,701
+libfuturize/fixes/fix_division.py,sha256=gnrAi7stquiVUyi_De1H8q--43iQaSUX0CjnOmQ6O2w,228
+libfuturize/fixes/fix_division_safe.py,sha256=Y_HUfQJAxRClXkcfqWP5SFCsRYZOsLUsNjLXlGOA3cQ,3292
+libfuturize/fixes/fix_execfile.py,sha256=I5AcJ6vPZ7i70TChaq9inxqnZ4C04-yJyfAItGa8E3c,921
+libfuturize/fixes/fix_future_builtins.py,sha256=QBCRpD9XA7tbtfP4wmOF2DXquB4lq-eupkQj-QAxp0s,2027
+libfuturize/fixes/fix_future_standard_library.py,sha256=FVtflFt38efHe_SEX6k3m6IYAtKWjA4rAPZrlCv6yA0,733
+libfuturize/fixes/fix_future_standard_library_urllib.py,sha256=Rf81XcAXA-vwNvrhskf5sLExbR--Wkr5fiUcMYGAKzs,1001
+libfuturize/fixes/fix_input.py,sha256=bhaPNtMrZNbjWIDQCR7Iue5BxBj4rf0RJQ9_jiwvb-s,687
+libfuturize/fixes/fix_metaclass.py,sha256=GLB76wbuyUVciDgW9bgNNOBEnLeS_AR-fKABcPBZk6M,9568
+libfuturize/fixes/fix_next_call.py,sha256=01STG86Av9o5QcpQDJ6UbPhvxt9kKrkatiPeddXRgvA,3158
+libfuturize/fixes/fix_object.py,sha256=qalFIjn0VTWXG5sGOOoCvO65omjX5_9d40SUpwUjBdw,407
+libfuturize/fixes/fix_oldstr_wrap.py,sha256=UCR6Q2l-pVqJSrRTnQAWMlaqBoX7oX1VpG_w6Q0XcyY,1214
+libfuturize/fixes/fix_order___future__imports.py,sha256=ACUCw5NEGWvj6XA9rNj8BYha3ktxLvkM5Ssh5cyV644,829
+libfuturize/fixes/fix_print.py,sha256=92s1w2t9SynA3Y1_85-lexSBbgEWJM6lBrhCxVacfDc,3384
+libfuturize/fixes/fix_print_with_import.py,sha256=hVWn70Q1DPMUiHMyEqgUx-6sM1AylLj78v9pMc4LFw8,735
+libfuturize/fixes/fix_raise.py,sha256=mEXpM9sS6tenMmxayfqM-Kp9gUvaztTY61vFaqyMUuo,3884
+libfuturize/fixes/fix_remove_old__future__imports.py,sha256=j4EC1KEVgXhuQAqhYHnAruUjW6uczPjV_fTCSOLMuAw,851
+libfuturize/fixes/fix_unicode_keep_u.py,sha256=M8fcFxHeFnWVOKoQRpkMsnpd9qmUFubI2oFhO4ZPk7A,779
+libfuturize/fixes/fix_unicode_literals_import.py,sha256=wq-hb-9Yx3Az4ol-ylXZJPEDZ81EaPZeIy5VvpA0CEY,367
+libfuturize/fixes/fix_xrange_with_import.py,sha256=f074qStjMz3OtLjt1bKKZSxQnRbbb7HzEbqHt9wgqdw,479
+libfuturize/main.py,sha256=feICmcv0dzWhutvwz0unnIVxusbSlQZFDaxObkHebs8,13733
+libpasteurize/__init__.py,sha256=CZA_KgvTQOPAY1_MrlJeQ6eMh2Eei4_KIv4JuyAkpfw,31
+libpasteurize/__pycache__/__init__.cpython-38.pyc,,
+libpasteurize/__pycache__/main.cpython-38.pyc,,
+libpasteurize/fixes/__init__.py,sha256=ccdv-2MGjQMbq8XuEZBndHmbzGRrZnabksjXZLUv044,3719
+libpasteurize/fixes/__pycache__/__init__.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/feature_base.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_add_all__future__imports.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_add_all_future_builtins.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_add_future_standard_library_import.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_annotations.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_division.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_features.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_fullargspec.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_future_builtins.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_getcwd.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_imports.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_imports2.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_kwargs.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_memoryview.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_metaclass.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_newstyle.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_next.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_printfunction.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_raise.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_raise_.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_throw.cpython-38.pyc,,
+libpasteurize/fixes/__pycache__/fix_unpacking.cpython-38.pyc,,
+libpasteurize/fixes/feature_base.py,sha256=v7yLjBDBUPeNUc-YHGGlIsJDOQzFAM4Vo0RN5F1JHVU,1723
+libpasteurize/fixes/fix_add_all__future__imports.py,sha256=mHet1LgbHn9GfgCYGNZXKo-rseDWreAvUcAjZwdgeTE,676
+libpasteurize/fixes/fix_add_all_future_builtins.py,sha256=scfkY-Sz5j0yDtLYls2ENOcqEMPVxeDm9gFYYPINPB8,1269
+libpasteurize/fixes/fix_add_future_standard_library_import.py,sha256=thTRbkBzy_SJjZ0bJteTp0sBTx8Wr69xFakH4styf7Y,663
+libpasteurize/fixes/fix_annotations.py,sha256=VT_AorKY9AYWYZUZ17_CeUrJlEA7VGkwVLDQlwD1Bxo,1581
+libpasteurize/fixes/fix_division.py,sha256=_TD_c5KniAYqEm11O7NJF0v2WEhYSNkRGcKG_94ZOas,904
+libpasteurize/fixes/fix_features.py,sha256=NZn0n34_MYZpLNwyP1Tf51hOiN58Rg7A8tA9pK1S8-c,2675
+libpasteurize/fixes/fix_fullargspec.py,sha256=VlZuIU6QNrClmRuvC4mtLICL3yMCi-RcGCnS9fD4b-Q,438
+libpasteurize/fixes/fix_future_builtins.py,sha256=SlCK9I9u05m19Lr1wxlJxF8toZ5yu0yXBeDLxUN9_fw,1450
+libpasteurize/fixes/fix_getcwd.py,sha256=uebvTvFboLqsROFCwdnzoP6ThziM0skz9TDXHoJcFsQ,873
+libpasteurize/fixes/fix_imports.py,sha256=U4lIs_5Xp1qqM8mN72ieDkkIdiyALZFyCZsRC8ZmXlM,4944
+libpasteurize/fixes/fix_imports2.py,sha256=bs2V5Yv0v_8xLx-lNj9kNEAK2dLYXUXkZ2hxECg01CU,8580
+libpasteurize/fixes/fix_kwargs.py,sha256=NB_Ap8YJk-9ncoJRbOiPY_VMIigFgVB8m8AuY29DDhE,5991
+libpasteurize/fixes/fix_memoryview.py,sha256=Fwayx_ezpr22tbJ0-QrKdJ-FZTpU-m7y78l1h_N4xxc,551
+libpasteurize/fixes/fix_metaclass.py,sha256=IcE2KjaDG8jUR3FYXECzOC_cr2pr5r95W1NTbMrK8Wc,3260
+libpasteurize/fixes/fix_newstyle.py,sha256=78sazKOHm9DUoMyW4VdvQpMXZhicbXzorVPRhBpSUrM,888
+libpasteurize/fixes/fix_next.py,sha256=VHqcyORRNVqKJ51jJ1OkhwxHuXRgp8qaldyqcMvA4J0,1233
+libpasteurize/fixes/fix_printfunction.py,sha256=NDIfqVmUJBG3H9E6nrnN0cWZK8ch9pL4F-nMexdsa38,401
+libpasteurize/fixes/fix_raise.py,sha256=zQ_AcMsGmCbtKMgrxZGcHLYNscw6tqXFvHQxgqtNbU8,1099
+libpasteurize/fixes/fix_raise_.py,sha256=9STp633frUfYASjYzqhwxx_MXePNmMhfJClowRj8FLY,1225
+libpasteurize/fixes/fix_throw.py,sha256=_ZREVre-WttUvk4sWjrqUNqm9Q1uFaATECN0_-PXKbk,835
+libpasteurize/fixes/fix_unpacking.py,sha256=eMqRe44Nfq8lo0YFL9oKW75dGARmBSmklj4BCS_q1Lo,5946
+libpasteurize/main.py,sha256=dVHYTQQeJonuOFDNrenJZl-rKHgOQKRMPP1OqnJogWQ,8186
+past/__init__.py,sha256=wIiXaAvXl3svDi-fzuy6HDD0VsuCVr4cnqnCr8XINGI,2918
+past/__pycache__/__init__.cpython-38.pyc,,
+past/builtins/__init__.py,sha256=7j_4OsUlN6q2eKr14do7mRQ1GwXRoXAMUR0A1fJpAls,1805
+past/builtins/__pycache__/__init__.cpython-38.pyc,,
+past/builtins/__pycache__/misc.cpython-38.pyc,,
+past/builtins/__pycache__/noniterators.cpython-38.pyc,,
+past/builtins/misc.py,sha256=nw62HVSxuAgT-Q2lD3lmgRB9zmFXopS14dZHEv5xpDQ,2627
+past/builtins/noniterators.py,sha256=LtdELnd7KyYdXg7GkW25cgkEPUC0ggZ5AYMtDe9N95I,9370
+past/translation/__init__.py,sha256=j2e6mLeK74KEICqH6P_-tpKqSNZoMwip2toThhSmKpU,17646
+past/translation/__pycache__/__init__.cpython-38.pyc,,
+past/types/__init__.py,sha256=RyJlgqg9uJ8oF-kJT9QlfhfdmhiMh3fShmtvd2CQycY,879
+past/types/__pycache__/__init__.cpython-38.pyc,,
+past/types/__pycache__/basestring.cpython-38.pyc,,
+past/types/__pycache__/olddict.cpython-38.pyc,,
+past/types/__pycache__/oldstr.cpython-38.pyc,,
+past/types/basestring.py,sha256=qrImcr24wvdDCMvF9x0Tyx8S1lCt6GIwRvzuAmvg_Tg,728
+past/types/olddict.py,sha256=0YtffZ55VY6AyQ_rwu4DZ4vcRsp6dz-dQzczeyN8hLk,2721
+past/types/oldstr.py,sha256=J2sJPC5jWEdpqXPcFwJFNDKn51TKhi86PsLFmJtQr-M,4332
+past/utils/__init__.py,sha256=e8l1sOfdiDJ3dkckBWLNWvC1ahC5BX5haHC2TGdNgA8,2633
+past/utils/__pycache__/__init__.cpython-38.pyc,,

+ 5 - 0
Lib/site-packages/future-0.18.2.dist-info/WHEEL

@@ -0,0 +1,5 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.34.2)
+Root-Is-Purelib: true
+Tag: py3-none-any
+

+ 4 - 0
Lib/site-packages/future-0.18.2.dist-info/entry_points.txt

@@ -0,0 +1,4 @@
+[console_scripts]
+futurize = libfuturize.main:main
+pasteurize = libpasteurize.main:main
+

+ 4 - 0
Lib/site-packages/future-0.18.2.dist-info/top_level.txt

@@ -0,0 +1,4 @@
+future
+libfuturize
+libpasteurize
+past

+ 93 - 0
Lib/site-packages/future/__init__.py

@@ -0,0 +1,93 @@
+"""
+future: Easy, safe support for Python 2/3 compatibility
+=======================================================
+
+``future`` is the missing compatibility layer between Python 2 and Python
+3. It allows you to use a single, clean Python 3.x-compatible codebase to
+support both Python 2 and Python 3 with minimal overhead.
+
+It is designed to be used as follows::
+
+    from __future__ import (absolute_import, division,
+                            print_function, unicode_literals)
+    from builtins import (
+             bytes, dict, int, list, object, range, str,
+             ascii, chr, hex, input, next, oct, open,
+             pow, round, super,
+             filter, map, zip)
+
+followed by predominantly standard, idiomatic Python 3 code that then runs
+similarly on Python 2.6/2.7 and Python 3.3+.
+
+The imports have no effect on Python 3. On Python 2, they shadow the
+corresponding builtins, which normally have different semantics on Python 3
+versus 2, to provide their Python 3 semantics.
+
+
+Standard library reorganization
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+``future`` supports the standard library reorganization (PEP 3108) through the
+following Py3 interfaces:
+
+    >>> # Top-level packages with Py3 names provided on Py2:
+    >>> import html.parser
+    >>> import queue
+    >>> import tkinter.dialog
+    >>> import xmlrpc.client
+    >>> # etc.
+
+    >>> # Aliases provided for extensions to existing Py2 module names:
+    >>> from future.standard_library import install_aliases
+    >>> install_aliases()
+
+    >>> from collections import Counter, OrderedDict   # backported to Py2.6
+    >>> from collections import UserDict, UserList, UserString
+    >>> import urllib.request
+    >>> from itertools import filterfalse, zip_longest
+    >>> from subprocess import getoutput, getstatusoutput
+
+
+Automatic conversion
+--------------------
+
+An included script called `futurize
+<http://python-future.org/automatic_conversion.html>`_ aids in converting
+code (from either Python 2 or Python 3) to code compatible with both
+platforms. It is similar to ``python-modernize`` but goes further in
+providing Python 3 compatibility through the use of the backported types
+and builtin functions in ``future``.
+
+
+Documentation
+-------------
+
+See: http://python-future.org
+
+
+Credits
+-------
+
+:Author:  Ed Schofield, Jordan M. Adler, et al
+:Sponsor: Python Charmers Pty Ltd, Australia, and Python Charmers Pte
+          Ltd, Singapore. http://pythoncharmers.com
+:Others:  See docs/credits.rst or http://python-future.org/credits.html
+
+
+Licensing
+---------
+Copyright 2013-2019 Python Charmers Pty Ltd, Australia.
+The software is distributed under an MIT licence. See LICENSE.txt.
+
+"""
+
+__title__ = 'future'
+__author__ = 'Ed Schofield'
+__license__ = 'MIT'
+__copyright__ = 'Copyright 2013-2019 Python Charmers Pty Ltd'
+__ver_major__ = 0
+__ver_minor__ = 18
+__ver_patch__ = 2
+__ver_sub__ = ''
+__version__ = "%d.%d.%d%s" % (__ver_major__, __ver_minor__,
+                              __ver_patch__, __ver_sub__)

+ 26 - 0
Lib/site-packages/future/backports/__init__.py

@@ -0,0 +1,26 @@
+"""
+future.backports package
+"""
+
+from __future__ import absolute_import
+
+import sys
+
+__future_module__ = True
+from future.standard_library import import_top_level_modules
+
+
+if sys.version_info[0] >= 3:
+    import_top_level_modules()
+
+
+from .misc import (ceil,
+                   OrderedDict,
+                   Counter,
+                   ChainMap,
+                   check_output,
+                   count,
+                   recursive_repr,
+                   _count_elements,
+                   cmp_to_key
+                  )

+ 422 - 0
Lib/site-packages/future/backports/_markupbase.py

@@ -0,0 +1,422 @@
+"""Shared support for scanning document type declarations in HTML and XHTML.
+
+Backported for python-future from Python 3.3. Reason: ParserBase is an
+old-style class in the Python 2.7 source of markupbase.py, which I suspect
+might be the cause of sporadic unit-test failures on travis-ci.org with
+test_htmlparser.py.  The test failures look like this:
+
+    ======================================================================
+
+ERROR: test_attr_entity_replacement (future.tests.test_htmlparser.AttributesStrictTestCase)
+
+----------------------------------------------------------------------
+
+Traceback (most recent call last):
+  File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 661, in test_attr_entity_replacement
+    [("starttag", "a", [("b", "&><\"'")])])
+  File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 93, in _run_check
+    collector = self.get_collector()
+  File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 617, in get_collector
+    return EventCollector(strict=True)
+  File "/home/travis/build/edschofield/python-future/future/tests/test_htmlparser.py", line 27, in __init__
+    html.parser.HTMLParser.__init__(self, *args, **kw)
+  File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 135, in __init__
+    self.reset()
+  File "/home/travis/build/edschofield/python-future/future/backports/html/parser.py", line 143, in reset
+    _markupbase.ParserBase.reset(self)
+
+TypeError: unbound method reset() must be called with ParserBase instance as first argument (got EventCollector instance instead)
+
+This module is used as a foundation for the html.parser module.  It has no
+documented public API and should not be used directly.
+
+"""
+
+import re
+
+_declname_match = re.compile(r'[a-zA-Z][-_.a-zA-Z0-9]*\s*').match
+_declstringlit_match = re.compile(r'(\'[^\']*\'|"[^"]*")\s*').match
+_commentclose = re.compile(r'--\s*>')
+_markedsectionclose = re.compile(r']\s*]\s*>')
+
+# An analysis of the MS-Word extensions is available at
+# http://www.planetpublish.com/xmlarena/xap/Thursday/WordtoXML.pdf
+
+_msmarkedsectionclose = re.compile(r']\s*>')
+
+del re
+
+
+class ParserBase(object):
+    """Parser base class which provides some common support methods used
+    by the SGML/HTML and XHTML parsers."""
+
+    def __init__(self):
+        if self.__class__ is ParserBase:
+            raise RuntimeError(
+                "_markupbase.ParserBase must be subclassed")
+
+    def error(self, message):
+        raise NotImplementedError(
+            "subclasses of ParserBase must override error()")
+
+    def reset(self):
+        self.lineno = 1
+        self.offset = 0
+
+    def getpos(self):
+        """Return current line number and offset."""
+        return self.lineno, self.offset
+
+    # Internal -- update line number and offset.  This should be
+    # called for each piece of data exactly once, in order -- in other
+    # words the concatenation of all the input strings to this
+    # function should be exactly the entire input.
+    def updatepos(self, i, j):
+        if i >= j:
+            return j
+        rawdata = self.rawdata
+        nlines = rawdata.count("\n", i, j)
+        if nlines:
+            self.lineno = self.lineno + nlines
+            pos = rawdata.rindex("\n", i, j) # Should not fail
+            self.offset = j-(pos+1)
+        else:
+            self.offset = self.offset + j-i
+        return j
+
+    _decl_otherchars = ''
+
+    # Internal -- parse declaration (for use by subclasses).
+    def parse_declaration(self, i):
+        # This is some sort of declaration; in "HTML as
+        # deployed," this should only be the document type
+        # declaration ("<!DOCTYPE html...>").
+        # ISO 8879:1986, however, has more complex
+        # declaration syntax for elements in <!...>, including:
+        # --comment--
+        # [marked section]
+        # name in the following list: ENTITY, DOCTYPE, ELEMENT,
+        # ATTLIST, NOTATION, SHORTREF, USEMAP,
+        # LINKTYPE, LINK, IDLINK, USELINK, SYSTEM
+        rawdata = self.rawdata
+        j = i + 2
+        assert rawdata[i:j] == "<!", "unexpected call to parse_declaration"
+        if rawdata[j:j+1] == ">":
+            # the empty comment <!>
+            return j + 1
+        if rawdata[j:j+1] in ("-", ""):
+            # Start of comment followed by buffer boundary,
+            # or just a buffer boundary.
+            return -1
+        # A simple, practical version could look like: ((name|stringlit) S*) + '>'
+        n = len(rawdata)
+        if rawdata[j:j+2] == '--': #comment
+            # Locate --.*-- as the body of the comment
+            return self.parse_comment(i)
+        elif rawdata[j] == '[': #marked section
+            # Locate [statusWord [...arbitrary SGML...]] as the body of the marked section
+            # Where statusWord is one of TEMP, CDATA, IGNORE, INCLUDE, RCDATA
+            # Note that this is extended by Microsoft Office "Save as Web" function
+            # to include [if...] and [endif].
+            return self.parse_marked_section(i)
+        else: #all other declaration elements
+            decltype, j = self._scan_name(j, i)
+        if j < 0:
+            return j
+        if decltype == "doctype":
+            self._decl_otherchars = ''
+        while j < n:
+            c = rawdata[j]
+            if c == ">":
+                # end of declaration syntax
+                data = rawdata[i+2:j]
+                if decltype == "doctype":
+                    self.handle_decl(data)
+                else:
+                    # According to the HTML5 specs sections "8.2.4.44 Bogus
+                    # comment state" and "8.2.4.45 Markup declaration open
+                    # state", a comment token should be emitted.
+                    # Calling unknown_decl provides more flexibility though.
+                    self.unknown_decl(data)
+                return j + 1
+            if c in "\"'":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1 # incomplete
+                j = m.end()
+            elif c in "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ":
+                name, j = self._scan_name(j, i)
+            elif c in self._decl_otherchars:
+                j = j + 1
+            elif c == "[":
+                # this could be handled in a separate doctype parser
+                if decltype == "doctype":
+                    j = self._parse_doctype_subset(j + 1, i)
+                elif decltype in set(["attlist", "linktype", "link", "element"]):
+                    # must tolerate []'d groups in a content model in an element declaration
+                    # also in data attribute specifications of attlist declaration
+                    # also link type declaration subsets in linktype declarations
+                    # also link attribute specification lists in link declarations
+                    self.error("unsupported '[' char in %s declaration" % decltype)
+                else:
+                    self.error("unexpected '[' char in declaration")
+            else:
+                self.error(
+                    "unexpected %r char in declaration" % rawdata[j])
+            if j < 0:
+                return j
+        return -1 # incomplete
+
+    # Internal -- parse a marked section
+    # Override this to handle MS-word extension syntax <![if word]>content<![endif]>
+    def parse_marked_section(self, i, report=1):
+        rawdata= self.rawdata
+        assert rawdata[i:i+3] == '<![', "unexpected call to parse_marked_section()"
+        sectName, j = self._scan_name( i+3, i )
+        if j < 0:
+            return j
+        if sectName in set(["temp", "cdata", "ignore", "include", "rcdata"]):
+            # look for standard ]]> ending
+            match= _markedsectionclose.search(rawdata, i+3)
+        elif sectName in set(["if", "else", "endif"]):
+            # look for MS Office ]> ending
+            match= _msmarkedsectionclose.search(rawdata, i+3)
+        else:
+            self.error('unknown status keyword %r in marked section' % rawdata[i+3:j])
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.unknown_decl(rawdata[i+3: j])
+        return match.end(0)
+
+    # Internal -- parse comment, return length or -1 if not terminated
+    def parse_comment(self, i, report=1):
+        rawdata = self.rawdata
+        if rawdata[i:i+4] != '<!--':
+            self.error('unexpected call to parse_comment()')
+        match = _commentclose.search(rawdata, i+4)
+        if not match:
+            return -1
+        if report:
+            j = match.start(0)
+            self.handle_comment(rawdata[i+4: j])
+        return match.end(0)
+
+    # Internal -- scan past the internal subset in a <!DOCTYPE declaration,
+    # returning the index just past any whitespace following the trailing ']'.
+    def _parse_doctype_subset(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        j = i
+        while j < n:
+            c = rawdata[j]
+            if c == "<":
+                s = rawdata[j:j+2]
+                if s == "<":
+                    # end of buffer; incomplete
+                    return -1
+                if s != "<!":
+                    self.updatepos(declstartpos, j + 1)
+                    self.error("unexpected char in internal subset (in %r)" % s)
+                if (j + 2) == n:
+                    # end of buffer; incomplete
+                    return -1
+                if (j + 4) > n:
+                    # end of buffer; incomplete
+                    return -1
+                if rawdata[j:j+4] == "<!--":
+                    j = self.parse_comment(j, report=0)
+                    if j < 0:
+                        return j
+                    continue
+                name, j = self._scan_name(j + 2, declstartpos)
+                if j == -1:
+                    return -1
+                if name not in set(["attlist", "element", "entity", "notation"]):
+                    self.updatepos(declstartpos, j + 2)
+                    self.error(
+                        "unknown declaration %r in internal subset" % name)
+                # handle the individual names
+                meth = getattr(self, "_parse_doctype_" + name)
+                j = meth(j, declstartpos)
+                if j < 0:
+                    return j
+            elif c == "%":
+                # parameter entity reference
+                if (j + 1) == n:
+                    # end of buffer; incomplete
+                    return -1
+                s, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                if rawdata[j] == ";":
+                    j = j + 1
+            elif c == "]":
+                j = j + 1
+                while j < n and rawdata[j].isspace():
+                    j = j + 1
+                if j < n:
+                    if rawdata[j] == ">":
+                        return j
+                    self.updatepos(declstartpos, j)
+                    self.error("unexpected char after internal subset")
+                else:
+                    return -1
+            elif c.isspace():
+                j = j + 1
+            else:
+                self.updatepos(declstartpos, j)
+                self.error("unexpected char %r in internal subset" % c)
+        # end of buffer reached
+        return -1
+
+    # Internal -- scan past <!ELEMENT declarations
+    def _parse_doctype_element(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j == -1:
+            return -1
+        # style content model; just skip until '>'
+        rawdata = self.rawdata
+        if '>' in rawdata[j:]:
+            return rawdata.find(">", j) + 1
+        return -1
+
+    # Internal -- scan past <!ATTLIST declarations
+    def _parse_doctype_attlist(self, i, declstartpos):
+        rawdata = self.rawdata
+        name, j = self._scan_name(i, declstartpos)
+        c = rawdata[j:j+1]
+        if c == "":
+            return -1
+        if c == ">":
+            return j + 1
+        while 1:
+            # scan a series of attribute descriptions; simplified:
+            #   name type [value] [#constraint]
+            name, j = self._scan_name(j, declstartpos)
+            if j < 0:
+                return j
+            c = rawdata[j:j+1]
+            if c == "":
+                return -1
+            if c == "(":
+                # an enumerated type; look for ')'
+                if ")" in rawdata[j:]:
+                    j = rawdata.find(")", j) + 1
+                else:
+                    return -1
+                while rawdata[j:j+1].isspace():
+                    j = j + 1
+                if not rawdata[j:]:
+                    # end of buffer, incomplete
+                    return -1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+            c = rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == "#":
+                if rawdata[j:] == "#":
+                    # end of buffer
+                    return -1
+                name, j = self._scan_name(j + 1, declstartpos)
+                if j < 0:
+                    return j
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+            if c == '>':
+                # all done
+                return j + 1
+
+    # Internal -- scan past <!NOTATION declarations
+    def _parse_doctype_notation(self, i, declstartpos):
+        name, j = self._scan_name(i, declstartpos)
+        if j < 0:
+            return j
+        rawdata = self.rawdata
+        while 1:
+            c = rawdata[j:j+1]
+            if not c:
+                # end of buffer; incomplete
+                return -1
+            if c == '>':
+                return j + 1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if not m:
+                    return -1
+                j = m.end()
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan past <!ENTITY declarations
+    def _parse_doctype_entity(self, i, declstartpos):
+        rawdata = self.rawdata
+        if rawdata[i:i+1] == "%":
+            j = i + 1
+            while 1:
+                c = rawdata[j:j+1]
+                if not c:
+                    return -1
+                if c.isspace():
+                    j = j + 1
+                else:
+                    break
+        else:
+            j = i
+        name, j = self._scan_name(j, declstartpos)
+        if j < 0:
+            return j
+        while 1:
+            c = self.rawdata[j:j+1]
+            if not c:
+                return -1
+            if c in "'\"":
+                m = _declstringlit_match(rawdata, j)
+                if m:
+                    j = m.end()
+                else:
+                    return -1    # incomplete
+            elif c == ">":
+                return j + 1
+            else:
+                name, j = self._scan_name(j, declstartpos)
+                if j < 0:
+                    return j
+
+    # Internal -- scan a name token and the new position and the token, or
+    # return -1 if we've reached the end of the buffer.
+    def _scan_name(self, i, declstartpos):
+        rawdata = self.rawdata
+        n = len(rawdata)
+        if i == n:
+            return None, -1
+        m = _declname_match(rawdata, i)
+        if m:
+            s = m.group()
+            name = s.strip()
+            if (i + len(s)) == n:
+                return None, -1  # end of buffer
+            return name.lower(), m.end()
+        else:
+            self.updatepos(declstartpos, i)
+            self.error("expected name token at %r"
+                       % rawdata[declstartpos:declstartpos+20])
+
+    # To be overridden -- handlers for unknown objects
+    def unknown_decl(self, data):
+        pass

Файловите разлики са ограничени, защото са твърде много
+ 2152 - 0
Lib/site-packages/future/backports/datetime.py


+ 78 - 0
Lib/site-packages/future/backports/email/__init__.py

@@ -0,0 +1,78 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""
+Backport of the Python 3.3 email package for Python-Future.
+
+A package for parsing, handling, and generating email messages.
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+# Install the surrogate escape handler here because this is used by many
+# modules in the email package.
+from future.utils import surrogateescape
+surrogateescape.register_surrogateescape()
+# (Should this be done globally by ``future``?)
+
+
+__version__ = '5.1.0'
+
+__all__ = [
+    'base64mime',
+    'charset',
+    'encoders',
+    'errors',
+    'feedparser',
+    'generator',
+    'header',
+    'iterators',
+    'message',
+    'message_from_file',
+    'message_from_binary_file',
+    'message_from_string',
+    'message_from_bytes',
+    'mime',
+    'parser',
+    'quoprimime',
+    'utils',
+    ]
+
+
+
+# Some convenience routines.  Don't import Parser and Message as side-effects
+# of importing email since those cascadingly import most of the rest of the
+# email package.
+def message_from_string(s, *args, **kws):
+    """Parse a string into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from future.backports.email.parser import Parser
+    return Parser(*args, **kws).parsestr(s)
+
+def message_from_bytes(s, *args, **kws):
+    """Parse a bytes string into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from future.backports.email.parser import BytesParser
+    return BytesParser(*args, **kws).parsebytes(s)
+
+def message_from_file(fp, *args, **kws):
+    """Read a file and parse its contents into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from future.backports.email.parser import Parser
+    return Parser(*args, **kws).parse(fp)
+
+def message_from_binary_file(fp, *args, **kws):
+    """Read a binary file and parse its contents into a Message object model.
+
+    Optional _class and strict are passed to the Parser constructor.
+    """
+    from future.backports.email.parser import BytesParser
+    return BytesParser(*args, **kws).parse(fp)

+ 232 - 0
Lib/site-packages/future/backports/email/_encoded_words.py

@@ -0,0 +1,232 @@
+""" Routines for manipulating RFC2047 encoded words.
+
+This is currently a package-private API, but will be considered for promotion
+to a public API if there is demand.
+
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import bytes
+from future.builtins import chr
+from future.builtins import int
+from future.builtins import str
+
+# An ecoded word looks like this:
+#
+#        =?charset[*lang]?cte?encoded_string?=
+#
+# for more information about charset see the charset module.  Here it is one
+# of the preferred MIME charset names (hopefully; you never know when parsing).
+# cte (Content Transfer Encoding) is either 'q' or 'b' (ignoring case).  In
+# theory other letters could be used for other encodings, but in practice this
+# (almost?) never happens.  There could be a public API for adding entries
+# to the CTE tables, but YAGNI for now.  'q' is Quoted Printable, 'b' is
+# Base64.  The meaning of encoded_string should be obvious.  'lang' is optional
+# as indicated by the brackets (they are not part of the syntax) but is almost
+# never encountered in practice.
+#
+# The general interface for a CTE decoder is that it takes the encoded_string
+# as its argument, and returns a tuple (cte_decoded_string, defects).  The
+# cte_decoded_string is the original binary that was encoded using the
+# specified cte.  'defects' is a list of MessageDefect instances indicating any
+# problems encountered during conversion.  'charset' and 'lang' are the
+# corresponding strings extracted from the EW, case preserved.
+#
+# The general interface for a CTE encoder is that it takes a binary sequence
+# as input and returns the cte_encoded_string, which is an ascii-only string.
+#
+# Each decoder must also supply a length function that takes the binary
+# sequence as its argument and returns the length of the resulting encoded
+# string.
+#
+# The main API functions for the module are decode, which calls the decoder
+# referenced by the cte specifier, and encode, which adds the appropriate
+# RFC 2047 "chrome" to the encoded string, and can optionally automatically
+# select the shortest possible encoding.  See their docstrings below for
+# details.
+
+import re
+import base64
+import binascii
+import functools
+from string import ascii_letters, digits
+from future.backports.email import errors
+
+__all__ = ['decode_q',
+           'encode_q',
+           'decode_b',
+           'encode_b',
+           'len_q',
+           'len_b',
+           'decode',
+           'encode',
+           ]
+
+#
+# Quoted Printable
+#
+
+# regex based decoder.
+_q_byte_subber = functools.partial(re.compile(br'=([a-fA-F0-9]{2})').sub,
+        lambda m: bytes([int(m.group(1), 16)]))
+
+def decode_q(encoded):
+    encoded = bytes(encoded.replace(b'_', b' '))
+    return _q_byte_subber(encoded), []
+
+
+# dict mapping bytes to their encoded form
+class _QByteMap(dict):
+
+    safe = bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii'))
+
+    def __missing__(self, key):
+        if key in self.safe:
+            self[key] = chr(key)
+        else:
+            self[key] = "={:02X}".format(key)
+        return self[key]
+
+_q_byte_map = _QByteMap()
+
+# In headers spaces are mapped to '_'.
+_q_byte_map[ord(' ')] = '_'
+
+def encode_q(bstring):
+    return str(''.join(_q_byte_map[x] for x in bytes(bstring)))
+
+def len_q(bstring):
+    return sum(len(_q_byte_map[x]) for x in bytes(bstring))
+
+
+#
+# Base64
+#
+
+def decode_b(encoded):
+    defects = []
+    pad_err = len(encoded) % 4
+    if pad_err:
+        defects.append(errors.InvalidBase64PaddingDefect())
+        padded_encoded = encoded + b'==='[:4-pad_err]
+    else:
+        padded_encoded = encoded
+    try:
+        # The validate kwarg to b64decode is not supported in Py2.x
+        if not re.match(b'^[A-Za-z0-9+/]*={0,2}$', padded_encoded):
+            raise binascii.Error('Non-base64 digit found')
+        return base64.b64decode(padded_encoded), defects
+    except binascii.Error:
+        # Since we had correct padding, this must an invalid char error.
+        defects = [errors.InvalidBase64CharactersDefect()]
+        # The non-alphabet characters are ignored as far as padding
+        # goes, but we don't know how many there are.  So we'll just
+        # try various padding lengths until something works.
+        for i in 0, 1, 2, 3:
+            try:
+                return base64.b64decode(encoded+b'='*i), defects
+            except (binascii.Error, TypeError):    # Py2 raises a TypeError
+                if i==0:
+                    defects.append(errors.InvalidBase64PaddingDefect())
+        else:
+            # This should never happen.
+            raise AssertionError("unexpected binascii.Error")
+
+def encode_b(bstring):
+    return base64.b64encode(bstring).decode('ascii')
+
+def len_b(bstring):
+    groups_of_3, leftover = divmod(len(bstring), 3)
+    # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+    return groups_of_3 * 4 + (4 if leftover else 0)
+
+
+_cte_decoders = {
+    'q': decode_q,
+    'b': decode_b,
+    }
+
+def decode(ew):
+    """Decode encoded word and return (string, charset, lang, defects) tuple.
+
+    An RFC 2047/2243 encoded word has the form:
+
+        =?charset*lang?cte?encoded_string?=
+
+    where '*lang' may be omitted but the other parts may not be.
+
+    This function expects exactly such a string (that is, it does not check the
+    syntax and may raise errors if the string is not well formed), and returns
+    the encoded_string decoded first from its Content Transfer Encoding and
+    then from the resulting bytes into unicode using the specified charset.  If
+    the cte-decoded string does not successfully decode using the specified
+    character set, a defect is added to the defects list and the unknown octets
+    are replaced by the unicode 'unknown' character \uFDFF.
+
+    The specified charset and language are returned.  The default for language,
+    which is rarely if ever encountered, is the empty string.
+
+    """
+    _, charset, cte, cte_string, _ = str(ew).split('?')
+    charset, _, lang = charset.partition('*')
+    cte = cte.lower()
+    # Recover the original bytes and do CTE decoding.
+    bstring = cte_string.encode('ascii', 'surrogateescape')
+    bstring, defects = _cte_decoders[cte](bstring)
+    # Turn the CTE decoded bytes into unicode.
+    try:
+        string = bstring.decode(charset)
+    except UnicodeError:
+        defects.append(errors.UndecodableBytesDefect("Encoded word "
+            "contains bytes not decodable using {} charset".format(charset)))
+        string = bstring.decode(charset, 'surrogateescape')
+    except LookupError:
+        string = bstring.decode('ascii', 'surrogateescape')
+        if charset.lower() != 'unknown-8bit':
+            defects.append(errors.CharsetError("Unknown charset {} "
+                "in encoded word; decoded as unknown bytes".format(charset)))
+    return string, charset, lang, defects
+
+
+_cte_encoders = {
+    'q': encode_q,
+    'b': encode_b,
+    }
+
+_cte_encode_length = {
+    'q': len_q,
+    'b': len_b,
+    }
+
+def encode(string, charset='utf-8', encoding=None, lang=''):
+    """Encode string using the CTE encoding that produces the shorter result.
+
+    Produces an RFC 2047/2243 encoded word of the form:
+
+        =?charset*lang?cte?encoded_string?=
+
+    where '*lang' is omitted unless the 'lang' parameter is given a value.
+    Optional argument charset (defaults to utf-8) specifies the charset to use
+    to encode the string to binary before CTE encoding it.  Optional argument
+    'encoding' is the cte specifier for the encoding that should be used ('q'
+    or 'b'); if it is None (the default) the encoding which produces the
+    shortest encoded sequence is used, except that 'q' is preferred if it is up
+    to five characters longer.  Optional argument 'lang' (default '') gives the
+    RFC 2243 language string to specify in the encoded word.
+
+    """
+    string = str(string)
+    if charset == 'unknown-8bit':
+        bstring = string.encode('ascii', 'surrogateescape')
+    else:
+        bstring = string.encode(charset)
+    if encoding is None:
+        qlen = _cte_encode_length['q'](bstring)
+        blen = _cte_encode_length['b'](bstring)
+        # Bias toward q.  5 is arbitrary.
+        encoding = 'q' if qlen - blen < 5 else 'b'
+    encoded = _cte_encoders[encoding](bstring)
+    if lang:
+        lang = '*' + lang
+    return "=?{0}{1}?{2}?{3}?=".format(charset, lang, encoding, encoded)

Файловите разлики са ограничени, защото са твърде много
+ 2965 - 0
Lib/site-packages/future/backports/email/_header_value_parser.py


+ 546 - 0
Lib/site-packages/future/backports/email/_parseaddr.py

@@ -0,0 +1,546 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Contact: email-sig@python.org
+
+"""Email address parsing code.
+
+Lifted directly from rfc822.py.  This should eventually be rewritten.
+"""
+
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import int
+
+__all__ = [
+    'mktime_tz',
+    'parsedate',
+    'parsedate_tz',
+    'quote',
+    ]
+
+import time, calendar
+
+SPACE = ' '
+EMPTYSTRING = ''
+COMMASPACE = ', '
+
+# Parse a date field
+_monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun', 'jul',
+               'aug', 'sep', 'oct', 'nov', 'dec',
+               'january', 'february', 'march', 'april', 'may', 'june', 'july',
+               'august', 'september', 'october', 'november', 'december']
+
+_daynames = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun']
+
+# The timezone table does not include the military time zones defined
+# in RFC822, other than Z.  According to RFC1123, the description in
+# RFC822 gets the signs wrong, so we can't rely on any such time
+# zones.  RFC1123 recommends that numeric timezone indicators be used
+# instead of timezone names.
+
+_timezones = {'UT':0, 'UTC':0, 'GMT':0, 'Z':0,
+              'AST': -400, 'ADT': -300,  # Atlantic (used in Canada)
+              'EST': -500, 'EDT': -400,  # Eastern
+              'CST': -600, 'CDT': -500,  # Central
+              'MST': -700, 'MDT': -600,  # Mountain
+              'PST': -800, 'PDT': -700   # Pacific
+              }
+
+
+def parsedate_tz(data):
+    """Convert a date string to a time tuple.
+
+    Accounts for military timezones.
+    """
+    res = _parsedate_tz(data)
+    if not res:
+        return
+    if res[9] is None:
+        res[9] = 0
+    return tuple(res)
+
+def _parsedate_tz(data):
+    """Convert date to extended time tuple.
+
+    The last (additional) element is the time zone offset in seconds, except if
+    the timezone was specified as -0000.  In that case the last element is
+    None.  This indicates a UTC timestamp that explicitly declaims knowledge of
+    the source timezone, as opposed to a +0000 timestamp that indicates the
+    source timezone really was UTC.
+
+    """
+    if not data:
+        return
+    data = data.split()
+    # The FWS after the comma after the day-of-week is optional, so search and
+    # adjust for this.
+    if data[0].endswith(',') or data[0].lower() in _daynames:
+        # There's a dayname here. Skip it
+        del data[0]
+    else:
+        i = data[0].rfind(',')
+        if i >= 0:
+            data[0] = data[0][i+1:]
+    if len(data) == 3: # RFC 850 date, deprecated
+        stuff = data[0].split('-')
+        if len(stuff) == 3:
+            data = stuff + data[1:]
+    if len(data) == 4:
+        s = data[3]
+        i = s.find('+')
+        if i == -1:
+            i = s.find('-')
+        if i > 0:
+            data[3:] = [s[:i], s[i:]]
+        else:
+            data.append('') # Dummy tz
+    if len(data) < 5:
+        return None
+    data = data[:5]
+    [dd, mm, yy, tm, tz] = data
+    mm = mm.lower()
+    if mm not in _monthnames:
+        dd, mm = mm, dd.lower()
+        if mm not in _monthnames:
+            return None
+    mm = _monthnames.index(mm) + 1
+    if mm > 12:
+        mm -= 12
+    if dd[-1] == ',':
+        dd = dd[:-1]
+    i = yy.find(':')
+    if i > 0:
+        yy, tm = tm, yy
+    if yy[-1] == ',':
+        yy = yy[:-1]
+    if not yy[0].isdigit():
+        yy, tz = tz, yy
+    if tm[-1] == ',':
+        tm = tm[:-1]
+    tm = tm.split(':')
+    if len(tm) == 2:
+        [thh, tmm] = tm
+        tss = '0'
+    elif len(tm) == 3:
+        [thh, tmm, tss] = tm
+    elif len(tm) == 1 and '.' in tm[0]:
+        # Some non-compliant MUAs use '.' to separate time elements.
+        tm = tm[0].split('.')
+        if len(tm) == 2:
+            [thh, tmm] = tm
+            tss = 0
+        elif len(tm) == 3:
+            [thh, tmm, tss] = tm
+    else:
+        return None
+    try:
+        yy = int(yy)
+        dd = int(dd)
+        thh = int(thh)
+        tmm = int(tmm)
+        tss = int(tss)
+    except ValueError:
+        return None
+    # Check for a yy specified in two-digit format, then convert it to the
+    # appropriate four-digit format, according to the POSIX standard. RFC 822
+    # calls for a two-digit yy, but RFC 2822 (which obsoletes RFC 822)
+    # mandates a 4-digit yy. For more information, see the documentation for
+    # the time module.
+    if yy < 100:
+        # The year is between 1969 and 1999 (inclusive).
+        if yy > 68:
+            yy += 1900
+        # The year is between 2000 and 2068 (inclusive).
+        else:
+            yy += 2000
+    tzoffset = None
+    tz = tz.upper()
+    if tz in _timezones:
+        tzoffset = _timezones[tz]
+    else:
+        try:
+            tzoffset = int(tz)
+        except ValueError:
+            pass
+        if tzoffset==0 and tz.startswith('-'):
+            tzoffset = None
+    # Convert a timezone offset into seconds ; -0500 -> -18000
+    if tzoffset:
+        if tzoffset < 0:
+            tzsign = -1
+            tzoffset = -tzoffset
+        else:
+            tzsign = 1
+        tzoffset = tzsign * ( (tzoffset//100)*3600 + (tzoffset % 100)*60)
+    # Daylight Saving Time flag is set to -1, since DST is unknown.
+    return [yy, mm, dd, thh, tmm, tss, 0, 1, -1, tzoffset]
+
+
+def parsedate(data):
+    """Convert a time string to a time tuple."""
+    t = parsedate_tz(data)
+    if isinstance(t, tuple):
+        return t[:9]
+    else:
+        return t
+
+
+def mktime_tz(data):
+    """Turn a 10-tuple as returned by parsedate_tz() into a POSIX timestamp."""
+    if data[9] is None:
+        # No zone info, so localtime is better assumption than GMT
+        return time.mktime(data[:8] + (-1,))
+    else:
+        t = calendar.timegm(data)
+        return t - data[9]
+
+
+def quote(str):
+    """Prepare string to be used in a quoted string.
+
+    Turns backslash and double quote characters into quoted pairs.  These
+    are the only characters that need to be quoted inside a quoted string.
+    Does not add the surrounding double quotes.
+    """
+    return str.replace('\\', '\\\\').replace('"', '\\"')
+
+
+class AddrlistClass(object):
+    """Address parser class by Ben Escoto.
+
+    To understand what this class does, it helps to have a copy of RFC 2822 in
+    front of you.
+
+    Note: this class interface is deprecated and may be removed in the future.
+    Use email.utils.AddressList instead.
+    """
+
+    def __init__(self, field):
+        """Initialize a new instance.
+
+        `field' is an unparsed address header field, containing
+        one or more addresses.
+        """
+        self.specials = '()<>@,:;.\"[]'
+        self.pos = 0
+        self.LWS = ' \t'
+        self.CR = '\r\n'
+        self.FWS = self.LWS + self.CR
+        self.atomends = self.specials + self.LWS + self.CR
+        # Note that RFC 2822 now specifies `.' as obs-phrase, meaning that it
+        # is obsolete syntax.  RFC 2822 requires that we recognize obsolete
+        # syntax, so allow dots in phrases.
+        self.phraseends = self.atomends.replace('.', '')
+        self.field = field
+        self.commentlist = []
+
+    def gotonext(self):
+        """Skip white space and extract comments."""
+        wslist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS + '\n\r':
+                if self.field[self.pos] not in '\n\r':
+                    wslist.append(self.field[self.pos])
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            else:
+                break
+        return EMPTYSTRING.join(wslist)
+
+    def getaddrlist(self):
+        """Parse all addresses.
+
+        Returns a list containing all of the addresses.
+        """
+        result = []
+        while self.pos < len(self.field):
+            ad = self.getaddress()
+            if ad:
+                result += ad
+            else:
+                result.append(('', ''))
+        return result
+
+    def getaddress(self):
+        """Parse the next address."""
+        self.commentlist = []
+        self.gotonext()
+
+        oldpos = self.pos
+        oldcl = self.commentlist
+        plist = self.getphraselist()
+
+        self.gotonext()
+        returnlist = []
+
+        if self.pos >= len(self.field):
+            # Bad email address technically, no domain.
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+
+        elif self.field[self.pos] in '.@':
+            # email address is just an addrspec
+            # this isn't very efficient since we start over
+            self.pos = oldpos
+            self.commentlist = oldcl
+            addrspec = self.getaddrspec()
+            returnlist = [(SPACE.join(self.commentlist), addrspec)]
+
+        elif self.field[self.pos] == ':':
+            # address is a group
+            returnlist = []
+
+            fieldlen = len(self.field)
+            self.pos += 1
+            while self.pos < len(self.field):
+                self.gotonext()
+                if self.pos < fieldlen and self.field[self.pos] == ';':
+                    self.pos += 1
+                    break
+                returnlist = returnlist + self.getaddress()
+
+        elif self.field[self.pos] == '<':
+            # Address is a phrase then a route addr
+            routeaddr = self.getrouteaddr()
+
+            if self.commentlist:
+                returnlist = [(SPACE.join(plist) + ' (' +
+                               ' '.join(self.commentlist) + ')', routeaddr)]
+            else:
+                returnlist = [(SPACE.join(plist), routeaddr)]
+
+        else:
+            if plist:
+                returnlist = [(SPACE.join(self.commentlist), plist[0])]
+            elif self.field[self.pos] in self.specials:
+                self.pos += 1
+
+        self.gotonext()
+        if self.pos < len(self.field) and self.field[self.pos] == ',':
+            self.pos += 1
+        return returnlist
+
+    def getrouteaddr(self):
+        """Parse a route address (Return-path value).
+
+        This method just skips all the route stuff and returns the addrspec.
+        """
+        if self.field[self.pos] != '<':
+            return
+
+        expectroute = False
+        self.pos += 1
+        self.gotonext()
+        adlist = ''
+        while self.pos < len(self.field):
+            if expectroute:
+                self.getdomain()
+                expectroute = False
+            elif self.field[self.pos] == '>':
+                self.pos += 1
+                break
+            elif self.field[self.pos] == '@':
+                self.pos += 1
+                expectroute = True
+            elif self.field[self.pos] == ':':
+                self.pos += 1
+            else:
+                adlist = self.getaddrspec()
+                self.pos += 1
+                break
+            self.gotonext()
+
+        return adlist
+
+    def getaddrspec(self):
+        """Parse an RFC 2822 addr-spec."""
+        aslist = []
+
+        self.gotonext()
+        while self.pos < len(self.field):
+            preserve_ws = True
+            if self.field[self.pos] == '.':
+                if aslist and not aslist[-1].strip():
+                    aslist.pop()
+                aslist.append('.')
+                self.pos += 1
+                preserve_ws = False
+            elif self.field[self.pos] == '"':
+                aslist.append('"%s"' % quote(self.getquote()))
+            elif self.field[self.pos] in self.atomends:
+                if aslist and not aslist[-1].strip():
+                    aslist.pop()
+                break
+            else:
+                aslist.append(self.getatom())
+            ws = self.gotonext()
+            if preserve_ws and ws:
+                aslist.append(ws)
+
+        if self.pos >= len(self.field) or self.field[self.pos] != '@':
+            return EMPTYSTRING.join(aslist)
+
+        aslist.append('@')
+        self.pos += 1
+        self.gotonext()
+        return EMPTYSTRING.join(aslist) + self.getdomain()
+
+    def getdomain(self):
+        """Get the complete domain name from an address."""
+        sdlist = []
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.LWS:
+                self.pos += 1
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] == '[':
+                sdlist.append(self.getdomainliteral())
+            elif self.field[self.pos] == '.':
+                self.pos += 1
+                sdlist.append('.')
+            elif self.field[self.pos] in self.atomends:
+                break
+            else:
+                sdlist.append(self.getatom())
+        return EMPTYSTRING.join(sdlist)
+
+    def getdelimited(self, beginchar, endchars, allowcomments=True):
+        """Parse a header fragment delimited by special characters.
+
+        `beginchar' is the start character for the fragment.
+        If self is not looking at an instance of `beginchar' then
+        getdelimited returns the empty string.
+
+        `endchars' is a sequence of allowable end-delimiting characters.
+        Parsing stops when one of these is encountered.
+
+        If `allowcomments' is non-zero, embedded RFC 2822 comments are allowed
+        within the parsed fragment.
+        """
+        if self.field[self.pos] != beginchar:
+            return ''
+
+        slist = ['']
+        quote = False
+        self.pos += 1
+        while self.pos < len(self.field):
+            if quote:
+                slist.append(self.field[self.pos])
+                quote = False
+            elif self.field[self.pos] in endchars:
+                self.pos += 1
+                break
+            elif allowcomments and self.field[self.pos] == '(':
+                slist.append(self.getcomment())
+                continue        # have already advanced pos from getcomment
+            elif self.field[self.pos] == '\\':
+                quote = True
+            else:
+                slist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(slist)
+
+    def getquote(self):
+        """Get a quote-delimited fragment from self's field."""
+        return self.getdelimited('"', '"\r', False)
+
+    def getcomment(self):
+        """Get a parenthesis-delimited fragment from self's field."""
+        return self.getdelimited('(', ')\r', True)
+
+    def getdomainliteral(self):
+        """Parse an RFC 2822 domain-literal."""
+        return '[%s]' % self.getdelimited('[', ']\r', False)
+
+    def getatom(self, atomends=None):
+        """Parse an RFC 2822 atom.
+
+        Optional atomends specifies a different set of end token delimiters
+        (the default is to use self.atomends).  This is used e.g. in
+        getphraselist() since phrase endings must not include the `.' (which
+        is legal in phrases)."""
+        atomlist = ['']
+        if atomends is None:
+            atomends = self.atomends
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in atomends:
+                break
+            else:
+                atomlist.append(self.field[self.pos])
+            self.pos += 1
+
+        return EMPTYSTRING.join(atomlist)
+
+    def getphraselist(self):
+        """Parse a sequence of RFC 2822 phrases.
+
+        A phrase is a sequence of words, which are in turn either RFC 2822
+        atoms or quoted-strings.  Phrases are canonicalized by squeezing all
+        runs of continuous whitespace into one space.
+        """
+        plist = []
+
+        while self.pos < len(self.field):
+            if self.field[self.pos] in self.FWS:
+                self.pos += 1
+            elif self.field[self.pos] == '"':
+                plist.append(self.getquote())
+            elif self.field[self.pos] == '(':
+                self.commentlist.append(self.getcomment())
+            elif self.field[self.pos] in self.phraseends:
+                break
+            else:
+                plist.append(self.getatom(self.phraseends))
+
+        return plist
+
+class AddressList(AddrlistClass):
+    """An AddressList encapsulates a list of parsed RFC 2822 addresses."""
+    def __init__(self, field):
+        AddrlistClass.__init__(self, field)
+        if field:
+            self.addresslist = self.getaddrlist()
+        else:
+            self.addresslist = []
+
+    def __len__(self):
+        return len(self.addresslist)
+
+    def __add__(self, other):
+        # Set union
+        newaddr = AddressList(None)
+        newaddr.addresslist = self.addresslist[:]
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __iadd__(self, other):
+        # Set union, in-place
+        for x in other.addresslist:
+            if not x in self.addresslist:
+                self.addresslist.append(x)
+        return self
+
+    def __sub__(self, other):
+        # Set difference
+        newaddr = AddressList(None)
+        for x in self.addresslist:
+            if not x in other.addresslist:
+                newaddr.addresslist.append(x)
+        return newaddr
+
+    def __isub__(self, other):
+        # Set difference, in-place
+        for x in other.addresslist:
+            if x in self.addresslist:
+                self.addresslist.remove(x)
+        return self
+
+    def __getitem__(self, index):
+        # Make indexing, slices, and 'in' work
+        return self.addresslist[index]

+ 365 - 0
Lib/site-packages/future/backports/email/_policybase.py

@@ -0,0 +1,365 @@
+"""Policy framework for the email package.
+
+Allows fine grained feature control of how the package parses and emits data.
+"""
+from __future__ import unicode_literals
+from __future__ import print_function
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import super
+from future.builtins import str
+from future.utils import with_metaclass
+
+import abc
+from future.backports.email import header
+from future.backports.email import charset as _charset
+from future.backports.email.utils import _has_surrogates
+
+__all__ = [
+    'Policy',
+    'Compat32',
+    'compat32',
+    ]
+
+
+class _PolicyBase(object):
+
+    """Policy Object basic framework.
+
+    This class is useless unless subclassed.  A subclass should define
+    class attributes with defaults for any values that are to be
+    managed by the Policy object.  The constructor will then allow
+    non-default values to be set for these attributes at instance
+    creation time.  The instance will be callable, taking these same
+    attributes keyword arguments, and returning a new instance
+    identical to the called instance except for those values changed
+    by the keyword arguments.  Instances may be added, yielding new
+    instances with any non-default values from the right hand
+    operand overriding those in the left hand operand.  That is,
+
+        A + B == A(<non-default values of B>)
+
+    The repr of an instance can be used to reconstruct the object
+    if and only if the repr of the values can be used to reconstruct
+    those values.
+
+    """
+
+    def __init__(self, **kw):
+        """Create new Policy, possibly overriding some defaults.
+
+        See class docstring for a list of overridable attributes.
+
+        """
+        for name, value in kw.items():
+            if hasattr(self, name):
+                super(_PolicyBase,self).__setattr__(name, value)
+            else:
+                raise TypeError(
+                    "{!r} is an invalid keyword argument for {}".format(
+                        name, self.__class__.__name__))
+
+    def __repr__(self):
+        args = [ "{}={!r}".format(name, value)
+                 for name, value in self.__dict__.items() ]
+        return "{}({})".format(self.__class__.__name__, ', '.join(args))
+
+    def clone(self, **kw):
+        """Return a new instance with specified attributes changed.
+
+        The new instance has the same attribute values as the current object,
+        except for the changes passed in as keyword arguments.
+
+        """
+        newpolicy = self.__class__.__new__(self.__class__)
+        for attr, value in self.__dict__.items():
+            object.__setattr__(newpolicy, attr, value)
+        for attr, value in kw.items():
+            if not hasattr(self, attr):
+                raise TypeError(
+                    "{!r} is an invalid keyword argument for {}".format(
+                        attr, self.__class__.__name__))
+            object.__setattr__(newpolicy, attr, value)
+        return newpolicy
+
+    def __setattr__(self, name, value):
+        if hasattr(self, name):
+            msg = "{!r} object attribute {!r} is read-only"
+        else:
+            msg = "{!r} object has no attribute {!r}"
+        raise AttributeError(msg.format(self.__class__.__name__, name))
+
+    def __add__(self, other):
+        """Non-default values from right operand override those from left.
+
+        The object returned is a new instance of the subclass.
+
+        """
+        return self.clone(**other.__dict__)
+
+
+def _append_doc(doc, added_doc):
+    doc = doc.rsplit('\n', 1)[0]
+    added_doc = added_doc.split('\n', 1)[1]
+    return doc + '\n' + added_doc
+
+def _extend_docstrings(cls):
+    if cls.__doc__ and cls.__doc__.startswith('+'):
+        cls.__doc__ = _append_doc(cls.__bases__[0].__doc__, cls.__doc__)
+    for name, attr in cls.__dict__.items():
+        if attr.__doc__ and attr.__doc__.startswith('+'):
+            for c in (c for base in cls.__bases__ for c in base.mro()):
+                doc = getattr(getattr(c, name), '__doc__')
+                if doc:
+                    attr.__doc__ = _append_doc(doc, attr.__doc__)
+                    break
+    return cls
+
+
+class Policy(with_metaclass(abc.ABCMeta, _PolicyBase)):
+
+    r"""Controls for how messages are interpreted and formatted.
+
+    Most of the classes and many of the methods in the email package accept
+    Policy objects as parameters.  A Policy object contains a set of values and
+    functions that control how input is interpreted and how output is rendered.
+    For example, the parameter 'raise_on_defect' controls whether or not an RFC
+    violation results in an error being raised or not, while 'max_line_length'
+    controls the maximum length of output lines when a Message is serialized.
+
+    Any valid attribute may be overridden when a Policy is created by passing
+    it as a keyword argument to the constructor.  Policy objects are immutable,
+    but a new Policy object can be created with only certain values changed by
+    calling the Policy instance with keyword arguments.  Policy objects can
+    also be added, producing a new Policy object in which the non-default
+    attributes set in the right hand operand overwrite those specified in the
+    left operand.
+
+    Settable attributes:
+
+    raise_on_defect     -- If true, then defects should be raised as errors.
+                           Default: False.
+
+    linesep             -- string containing the value to use as separation
+                           between output lines.  Default '\n'.
+
+    cte_type            -- Type of allowed content transfer encodings
+
+                           7bit  -- ASCII only
+                           8bit  -- Content-Transfer-Encoding: 8bit is allowed
+
+                           Default: 8bit.  Also controls the disposition of
+                           (RFC invalid) binary data in headers; see the
+                           documentation of the binary_fold method.
+
+    max_line_length     -- maximum length of lines, excluding 'linesep',
+                           during serialization.  None or 0 means no line
+                           wrapping is done.  Default is 78.
+
+    """
+
+    raise_on_defect = False
+    linesep = '\n'
+    cte_type = '8bit'
+    max_line_length = 78
+
+    def handle_defect(self, obj, defect):
+        """Based on policy, either raise defect or call register_defect.
+
+            handle_defect(obj, defect)
+
+        defect should be a Defect subclass, but in any case must be an
+        Exception subclass.  obj is the object on which the defect should be
+        registered if it is not raised.  If the raise_on_defect is True, the
+        defect is raised as an error, otherwise the object and the defect are
+        passed to register_defect.
+
+        This method is intended to be called by parsers that discover defects.
+        The email package parsers always call it with Defect instances.
+
+        """
+        if self.raise_on_defect:
+            raise defect
+        self.register_defect(obj, defect)
+
+    def register_defect(self, obj, defect):
+        """Record 'defect' on 'obj'.
+
+        Called by handle_defect if raise_on_defect is False.  This method is
+        part of the Policy API so that Policy subclasses can implement custom
+        defect handling.  The default implementation calls the append method of
+        the defects attribute of obj.  The objects used by the email package by
+        default that get passed to this method will always have a defects
+        attribute with an append method.
+
+        """
+        obj.defects.append(defect)
+
+    def header_max_count(self, name):
+        """Return the maximum allowed number of headers named 'name'.
+
+        Called when a header is added to a Message object.  If the returned
+        value is not 0 or None, and there are already a number of headers with
+        the name 'name' equal to the value returned, a ValueError is raised.
+
+        Because the default behavior of Message's __setitem__ is to append the
+        value to the list of headers, it is easy to create duplicate headers
+        without realizing it.  This method allows certain headers to be limited
+        in the number of instances of that header that may be added to a
+        Message programmatically.  (The limit is not observed by the parser,
+        which will faithfully produce as many headers as exist in the message
+        being parsed.)
+
+        The default implementation returns None for all header names.
+        """
+        return None
+
+    @abc.abstractmethod
+    def header_source_parse(self, sourcelines):
+        """Given a list of linesep terminated strings constituting the lines of
+        a single header, return the (name, value) tuple that should be stored
+        in the model.  The input lines should retain their terminating linesep
+        characters.  The lines passed in by the email package may contain
+        surrogateescaped binary data.
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def header_store_parse(self, name, value):
+        """Given the header name and the value provided by the application
+        program, return the (name, value) that should be stored in the model.
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def header_fetch_parse(self, name, value):
+        """Given the header name and the value from the model, return the value
+        to be returned to the application program that is requesting that
+        header.  The value passed in by the email package may contain
+        surrogateescaped binary data if the lines were parsed by a BytesParser.
+        The returned value should not contain any surrogateescaped data.
+
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def fold(self, name, value):
+        """Given the header name and the value from the model, return a string
+        containing linesep characters that implement the folding of the header
+        according to the policy controls.  The value passed in by the email
+        package may contain surrogateescaped binary data if the lines were
+        parsed by a BytesParser.  The returned value should not contain any
+        surrogateescaped data.
+
+        """
+        raise NotImplementedError
+
+    @abc.abstractmethod
+    def fold_binary(self, name, value):
+        """Given the header name and the value from the model, return binary
+        data containing linesep characters that implement the folding of the
+        header according to the policy controls.  The value passed in by the
+        email package may contain surrogateescaped binary data.
+
+        """
+        raise NotImplementedError
+
+
+@_extend_docstrings
+class Compat32(Policy):
+
+    """+
+    This particular policy is the backward compatibility Policy.  It
+    replicates the behavior of the email package version 5.1.
+    """
+
+    def _sanitize_header(self, name, value):
+        # If the header value contains surrogates, return a Header using
+        # the unknown-8bit charset to encode the bytes as encoded words.
+        if not isinstance(value, str):
+            # Assume it is already a header object
+            return value
+        if _has_surrogates(value):
+            return header.Header(value, charset=_charset.UNKNOWN8BIT,
+                                 header_name=name)
+        else:
+            return value
+
+    def header_source_parse(self, sourcelines):
+        """+
+        The name is parsed as everything up to the ':' and returned unmodified.
+        The value is determined by stripping leading whitespace off the
+        remainder of the first line, joining all subsequent lines together, and
+        stripping any trailing carriage return or linefeed characters.
+
+        """
+        name, value = sourcelines[0].split(':', 1)
+        value = value.lstrip(' \t') + ''.join(sourcelines[1:])
+        return (name, value.rstrip('\r\n'))
+
+    def header_store_parse(self, name, value):
+        """+
+        The name and value are returned unmodified.
+        """
+        return (name, value)
+
+    def header_fetch_parse(self, name, value):
+        """+
+        If the value contains binary data, it is converted into a Header object
+        using the unknown-8bit charset.  Otherwise it is returned unmodified.
+        """
+        return self._sanitize_header(name, value)
+
+    def fold(self, name, value):
+        """+
+        Headers are folded using the Header folding algorithm, which preserves
+        existing line breaks in the value, and wraps each resulting line to the
+        max_line_length.  Non-ASCII binary data are CTE encoded using the
+        unknown-8bit charset.
+
+        """
+        return self._fold(name, value, sanitize=True)
+
+    def fold_binary(self, name, value):
+        """+
+        Headers are folded using the Header folding algorithm, which preserves
+        existing line breaks in the value, and wraps each resulting line to the
+        max_line_length.  If cte_type is 7bit, non-ascii binary data is CTE
+        encoded using the unknown-8bit charset.  Otherwise the original source
+        header is used, with its existing line breaks and/or binary data.
+
+        """
+        folded = self._fold(name, value, sanitize=self.cte_type=='7bit')
+        return folded.encode('ascii', 'surrogateescape')
+
+    def _fold(self, name, value, sanitize):
+        parts = []
+        parts.append('%s: ' % name)
+        if isinstance(value, str):
+            if _has_surrogates(value):
+                if sanitize:
+                    h = header.Header(value,
+                                      charset=_charset.UNKNOWN8BIT,
+                                      header_name=name)
+                else:
+                    # If we have raw 8bit data in a byte string, we have no idea
+                    # what the encoding is.  There is no safe way to split this
+                    # string.  If it's ascii-subset, then we could do a normal
+                    # ascii split, but if it's multibyte then we could break the
+                    # string.  There's no way to know so the least harm seems to
+                    # be to not split the string and risk it being too long.
+                    parts.append(value)
+                    h = None
+            else:
+                h = header.Header(value, header_name=name)
+        else:
+            # Assume it is a Header-like object.
+            h = value
+        if h is not None:
+            parts.append(h.encode(linesep=self.linesep,
+                                  maxlinelen=self.max_line_length))
+        parts.append(self.linesep)
+        return ''.join(parts)
+
+
+compat32 = Compat32()

+ 120 - 0
Lib/site-packages/future/backports/email/base64mime.py

@@ -0,0 +1,120 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Base64 content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode arbitrary 8-bit data using the three 8-bit bytes in four 7-bit
+characters encoding known as Base64.
+
+It is used in the MIME standards for email to attach images, audio, and text
+using some 8-bit character sets to messages.
+
+This module provides an interface to encode and decode both headers and bodies
+with Base64 encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:, From:, Cc:, etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character conversion
+necessary for proper internationalized headers; it only does dumb encoding and
+decoding.  To deal with the various line wrapping issues, use the email.header
+module.
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import range
+from future.builtins import bytes
+
+__all__ = [
+    'body_decode',
+    'body_encode',
+    'decode',
+    'decodestring',
+    'header_encode',
+    'header_length',
+    ]
+
+
+from base64 import b64encode
+from binascii import b2a_base64, a2b_base64
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# See also Charset.py
+MISC_LEN = 7
+
+
+# Helpers
+def header_length(bytearray):
+    """Return the length of s when it is encoded with base64."""
+    groups_of_3, leftover = divmod(len(bytearray), 3)
+    # 4 bytes out for each 3 bytes (or nonzero fraction thereof) in.
+    n = groups_of_3 * 4
+    if leftover:
+        n += 4
+    return n
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+    """Encode a single header line with Base64 encoding in a given charset.
+
+    charset names the character set to use to encode the header.  It defaults
+    to iso-8859-1.  Base64 encoding is defined in RFC 2045.
+    """
+    if not header_bytes:
+        return ""
+    if isinstance(header_bytes, str):
+        header_bytes = header_bytes.encode(charset)
+    encoded = b64encode(header_bytes).decode("ascii")
+    return '=?%s?b?%s?=' % (charset, encoded)
+
+
+def body_encode(s, maxlinelen=76, eol=NL):
+    r"""Encode a string with base64.
+
+    Each line will be wrapped at, at most, maxlinelen characters (defaults to
+    76 characters).
+
+    Each line of encoded text will end with eol, which defaults to "\n".  Set
+    this to "\r\n" if you will be using the result of this function directly
+    in an email.
+    """
+    if not s:
+        return s
+
+    encvec = []
+    max_unencoded = maxlinelen * 3 // 4
+    for i in range(0, len(s), max_unencoded):
+        # BAW: should encode() inherit b2a_base64()'s dubious behavior in
+        # adding a newline to the encoded string?
+        enc = b2a_base64(s[i:i + max_unencoded]).decode("ascii")
+        if enc.endswith(NL) and eol != NL:
+            enc = enc[:-1] + eol
+        encvec.append(enc)
+    return EMPTYSTRING.join(encvec)
+
+
+def decode(string):
+    """Decode a raw base64 string, returning a bytes object.
+
+    This function does not parse a full MIME header value encoded with
+    base64 (like =?iso-8895-1?b?bmloISBuaWgh?=) -- please use the high
+    level email.header class for that functionality.
+    """
+    if not string:
+        return bytes()
+    elif isinstance(string, str):
+        return a2b_base64(string.encode('raw-unicode-escape'))
+    else:
+        return a2b_base64(string)
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode

+ 409 - 0
Lib/site-packages/future/backports/email/charset.py

@@ -0,0 +1,409 @@
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import str
+from future.builtins import next
+
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+__all__ = [
+    'Charset',
+    'add_alias',
+    'add_charset',
+    'add_codec',
+    ]
+
+from functools import partial
+
+from future.backports import email
+from future.backports.email import errors
+from future.backports.email.encoders import encode_7or8bit
+
+
+# Flags for types of header encodings
+QP          = 1 # Quoted-Printable
+BASE64      = 2 # Base64
+SHORTEST    = 3 # the shorter of QP and base64, but only for headers
+
+# In "=?charset?q?hello_world?=", the =?, ?q?, and ?= add up to 7
+RFC2047_CHROME_LEN = 7
+
+DEFAULT_CHARSET = 'us-ascii'
+UNKNOWN8BIT = 'unknown-8bit'
+EMPTYSTRING = ''
+
+
+# Defaults
+CHARSETS = {
+    # input        header enc  body enc output conv
+    'iso-8859-1':  (QP,        QP,      None),
+    'iso-8859-2':  (QP,        QP,      None),
+    'iso-8859-3':  (QP,        QP,      None),
+    'iso-8859-4':  (QP,        QP,      None),
+    # iso-8859-5 is Cyrillic, and not especially used
+    # iso-8859-6 is Arabic, also not particularly used
+    # iso-8859-7 is Greek, QP will not make it readable
+    # iso-8859-8 is Hebrew, QP will not make it readable
+    'iso-8859-9':  (QP,        QP,      None),
+    'iso-8859-10': (QP,        QP,      None),
+    # iso-8859-11 is Thai, QP will not make it readable
+    'iso-8859-13': (QP,        QP,      None),
+    'iso-8859-14': (QP,        QP,      None),
+    'iso-8859-15': (QP,        QP,      None),
+    'iso-8859-16': (QP,        QP,      None),
+    'windows-1252':(QP,        QP,      None),
+    'viscii':      (QP,        QP,      None),
+    'us-ascii':    (None,      None,    None),
+    'big5':        (BASE64,    BASE64,  None),
+    'gb2312':      (BASE64,    BASE64,  None),
+    'euc-jp':      (BASE64,    None,    'iso-2022-jp'),
+    'shift_jis':   (BASE64,    None,    'iso-2022-jp'),
+    'iso-2022-jp': (BASE64,    None,    None),
+    'koi8-r':      (BASE64,    BASE64,  None),
+    'utf-8':       (SHORTEST,  BASE64, 'utf-8'),
+    }
+
+# Aliases for other commonly-used names for character sets.  Map
+# them to the real ones used in email.
+ALIASES = {
+    'latin_1': 'iso-8859-1',
+    'latin-1': 'iso-8859-1',
+    'latin_2': 'iso-8859-2',
+    'latin-2': 'iso-8859-2',
+    'latin_3': 'iso-8859-3',
+    'latin-3': 'iso-8859-3',
+    'latin_4': 'iso-8859-4',
+    'latin-4': 'iso-8859-4',
+    'latin_5': 'iso-8859-9',
+    'latin-5': 'iso-8859-9',
+    'latin_6': 'iso-8859-10',
+    'latin-6': 'iso-8859-10',
+    'latin_7': 'iso-8859-13',
+    'latin-7': 'iso-8859-13',
+    'latin_8': 'iso-8859-14',
+    'latin-8': 'iso-8859-14',
+    'latin_9': 'iso-8859-15',
+    'latin-9': 'iso-8859-15',
+    'latin_10':'iso-8859-16',
+    'latin-10':'iso-8859-16',
+    'cp949':   'ks_c_5601-1987',
+    'euc_jp':  'euc-jp',
+    'euc_kr':  'euc-kr',
+    'ascii':   'us-ascii',
+    }
+
+
+# Map charsets to their Unicode codec strings.
+CODEC_MAP = {
+    'gb2312':      'eucgb2312_cn',
+    'big5':        'big5_tw',
+    # Hack: We don't want *any* conversion for stuff marked us-ascii, as all
+    # sorts of garbage might be sent to us in the guise of 7-bit us-ascii.
+    # Let that stuff pass through without conversion to/from Unicode.
+    'us-ascii':    None,
+    }
+
+
+# Convenience functions for extending the above mappings
+def add_charset(charset, header_enc=None, body_enc=None, output_charset=None):
+    """Add character set properties to the global registry.
+
+    charset is the input character set, and must be the canonical name of a
+    character set.
+
+    Optional header_enc and body_enc is either Charset.QP for
+    quoted-printable, Charset.BASE64 for base64 encoding, Charset.SHORTEST for
+    the shortest of qp or base64 encoding, or None for no encoding.  SHORTEST
+    is only valid for header_enc.  It describes how message headers and
+    message bodies in the input charset are to be encoded.  Default is no
+    encoding.
+
+    Optional output_charset is the character set that the output should be
+    in.  Conversions will proceed from input charset, to Unicode, to the
+    output charset when the method Charset.convert() is called.  The default
+    is to output in the same character set as the input.
+
+    Both input_charset and output_charset must have Unicode codec entries in
+    the module's charset-to-codec mapping; use add_codec(charset, codecname)
+    to add codecs the module does not know about.  See the codecs module's
+    documentation for more information.
+    """
+    if body_enc == SHORTEST:
+        raise ValueError('SHORTEST not allowed for body_enc')
+    CHARSETS[charset] = (header_enc, body_enc, output_charset)
+
+
+def add_alias(alias, canonical):
+    """Add a character set alias.
+
+    alias is the alias name, e.g. latin-1
+    canonical is the character set's canonical name, e.g. iso-8859-1
+    """
+    ALIASES[alias] = canonical
+
+
+def add_codec(charset, codecname):
+    """Add a codec that map characters in the given charset to/from Unicode.
+
+    charset is the canonical name of a character set.  codecname is the name
+    of a Python codec, as appropriate for the second argument to the unicode()
+    built-in, or to the encode() method of a Unicode string.
+    """
+    CODEC_MAP[charset] = codecname
+
+
+# Convenience function for encoding strings, taking into account
+# that they might be unknown-8bit (ie: have surrogate-escaped bytes)
+def _encode(string, codec):
+    string = str(string)
+    if codec == UNKNOWN8BIT:
+        return string.encode('ascii', 'surrogateescape')
+    else:
+        return string.encode(codec)
+
+
+class Charset(object):
+    """Map character sets to their email properties.
+
+    This class provides information about the requirements imposed on email
+    for a specific character set.  It also provides convenience routines for
+    converting between character sets, given the availability of the
+    applicable codecs.  Given a character set, it will do its best to provide
+    information on how to use that character set in an email in an
+    RFC-compliant way.
+
+    Certain character sets must be encoded with quoted-printable or base64
+    when used in email headers or bodies.  Certain character sets must be
+    converted outright, and are not allowed in email.  Instances of this
+    module expose the following information about a character set:
+
+    input_charset: The initial character set specified.  Common aliases
+                   are converted to their `official' email names (e.g. latin_1
+                   is converted to iso-8859-1).  Defaults to 7-bit us-ascii.
+
+    header_encoding: If the character set must be encoded before it can be
+                     used in an email header, this attribute will be set to
+                     Charset.QP (for quoted-printable), Charset.BASE64 (for
+                     base64 encoding), or Charset.SHORTEST for the shortest of
+                     QP or BASE64 encoding.  Otherwise, it will be None.
+
+    body_encoding: Same as header_encoding, but describes the encoding for the
+                   mail message's body, which indeed may be different than the
+                   header encoding.  Charset.SHORTEST is not allowed for
+                   body_encoding.
+
+    output_charset: Some character sets must be converted before they can be
+                    used in email headers or bodies.  If the input_charset is
+                    one of them, this attribute will contain the name of the
+                    charset output will be converted to.  Otherwise, it will
+                    be None.
+
+    input_codec: The name of the Python codec used to convert the
+                 input_charset to Unicode.  If no conversion codec is
+                 necessary, this attribute will be None.
+
+    output_codec: The name of the Python codec used to convert Unicode
+                  to the output_charset.  If no conversion codec is necessary,
+                  this attribute will have the same value as the input_codec.
+    """
+    def __init__(self, input_charset=DEFAULT_CHARSET):
+        # RFC 2046, $4.1.2 says charsets are not case sensitive.  We coerce to
+        # unicode because its .lower() is locale insensitive.  If the argument
+        # is already a unicode, we leave it at that, but ensure that the
+        # charset is ASCII, as the standard (RFC XXX) requires.
+        try:
+            if isinstance(input_charset, str):
+                input_charset.encode('ascii')
+            else:
+                input_charset = str(input_charset, 'ascii')
+        except UnicodeError:
+            raise errors.CharsetError(input_charset)
+        input_charset = input_charset.lower()
+        # Set the input charset after filtering through the aliases
+        self.input_charset = ALIASES.get(input_charset, input_charset)
+        # We can try to guess which encoding and conversion to use by the
+        # charset_map dictionary.  Try that first, but let the user override
+        # it.
+        henc, benc, conv = CHARSETS.get(self.input_charset,
+                                        (SHORTEST, BASE64, None))
+        if not conv:
+            conv = self.input_charset
+        # Set the attributes, allowing the arguments to override the default.
+        self.header_encoding = henc
+        self.body_encoding = benc
+        self.output_charset = ALIASES.get(conv, conv)
+        # Now set the codecs.  If one isn't defined for input_charset,
+        # guess and try a Unicode codec with the same name as input_codec.
+        self.input_codec = CODEC_MAP.get(self.input_charset,
+                                         self.input_charset)
+        self.output_codec = CODEC_MAP.get(self.output_charset,
+                                          self.output_charset)
+
+    def __str__(self):
+        return self.input_charset.lower()
+
+    __repr__ = __str__
+
+    def __eq__(self, other):
+        return str(self) == str(other).lower()
+
+    def __ne__(self, other):
+        return not self.__eq__(other)
+
+    def get_body_encoding(self):
+        """Return the content-transfer-encoding used for body encoding.
+
+        This is either the string `quoted-printable' or `base64' depending on
+        the encoding used, or it is a function in which case you should call
+        the function with a single argument, the Message object being
+        encoded.  The function should then set the Content-Transfer-Encoding
+        header itself to whatever is appropriate.
+
+        Returns "quoted-printable" if self.body_encoding is QP.
+        Returns "base64" if self.body_encoding is BASE64.
+        Returns conversion function otherwise.
+        """
+        assert self.body_encoding != SHORTEST
+        if self.body_encoding == QP:
+            return 'quoted-printable'
+        elif self.body_encoding == BASE64:
+            return 'base64'
+        else:
+            return encode_7or8bit
+
+    def get_output_charset(self):
+        """Return the output character set.
+
+        This is self.output_charset if that is not None, otherwise it is
+        self.input_charset.
+        """
+        return self.output_charset or self.input_charset
+
+    def header_encode(self, string):
+        """Header-encode a string by converting it first to bytes.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        this charset's `header_encoding`.
+
+        :param string: A unicode string for the header.  It must be possible
+            to encode this string to bytes using the character set's
+            output codec.
+        :return: The encoded string, with RFC 2047 chrome.
+        """
+        codec = self.output_codec or 'us-ascii'
+        header_bytes = _encode(string, codec)
+        # 7bit/8bit encodings return the string unchanged (modulo conversions)
+        encoder_module = self._get_encoder(header_bytes)
+        if encoder_module is None:
+            return string
+        return encoder_module.header_encode(header_bytes, codec)
+
+    def header_encode_lines(self, string, maxlengths):
+        """Header-encode a string by converting it first to bytes.
+
+        This is similar to `header_encode()` except that the string is fit
+        into maximum line lengths as given by the argument.
+
+        :param string: A unicode string for the header.  It must be possible
+            to encode this string to bytes using the character set's
+            output codec.
+        :param maxlengths: Maximum line length iterator.  Each element
+            returned from this iterator will provide the next maximum line
+            length.  This parameter is used as an argument to built-in next()
+            and should never be exhausted.  The maximum line lengths should
+            not count the RFC 2047 chrome.  These line lengths are only a
+            hint; the splitter does the best it can.
+        :return: Lines of encoded strings, each with RFC 2047 chrome.
+        """
+        # See which encoding we should use.
+        codec = self.output_codec or 'us-ascii'
+        header_bytes = _encode(string, codec)
+        encoder_module = self._get_encoder(header_bytes)
+        encoder = partial(encoder_module.header_encode, charset=codec)
+        # Calculate the number of characters that the RFC 2047 chrome will
+        # contribute to each line.
+        charset = self.get_output_charset()
+        extra = len(charset) + RFC2047_CHROME_LEN
+        # Now comes the hard part.  We must encode bytes but we can't split on
+        # bytes because some character sets are variable length and each
+        # encoded word must stand on its own.  So the problem is you have to
+        # encode to bytes to figure out this word's length, but you must split
+        # on characters.  This causes two problems: first, we don't know how
+        # many octets a specific substring of unicode characters will get
+        # encoded to, and second, we don't know how many ASCII characters
+        # those octets will get encoded to.  Unless we try it.  Which seems
+        # inefficient.  In the interest of being correct rather than fast (and
+        # in the hope that there will be few encoded headers in any such
+        # message), brute force it. :(
+        lines = []
+        current_line = []
+        maxlen = next(maxlengths) - extra
+        for character in string:
+            current_line.append(character)
+            this_line = EMPTYSTRING.join(current_line)
+            length = encoder_module.header_length(_encode(this_line, charset))
+            if length > maxlen:
+                # This last character doesn't fit so pop it off.
+                current_line.pop()
+                # Does nothing fit on the first line?
+                if not lines and not current_line:
+                    lines.append(None)
+                else:
+                    separator = (' ' if lines else '')
+                    joined_line = EMPTYSTRING.join(current_line)
+                    header_bytes = _encode(joined_line, codec)
+                    lines.append(encoder(header_bytes))
+                current_line = [character]
+                maxlen = next(maxlengths) - extra
+        joined_line = EMPTYSTRING.join(current_line)
+        header_bytes = _encode(joined_line, codec)
+        lines.append(encoder(header_bytes))
+        return lines
+
+    def _get_encoder(self, header_bytes):
+        if self.header_encoding == BASE64:
+            return email.base64mime
+        elif self.header_encoding == QP:
+            return email.quoprimime
+        elif self.header_encoding == SHORTEST:
+            len64 = email.base64mime.header_length(header_bytes)
+            lenqp = email.quoprimime.header_length(header_bytes)
+            if len64 < lenqp:
+                return email.base64mime
+            else:
+                return email.quoprimime
+        else:
+            return None
+
+    def body_encode(self, string):
+        """Body-encode a string by converting it first to bytes.
+
+        The type of encoding (base64 or quoted-printable) will be based on
+        self.body_encoding.  If body_encoding is None, we assume the
+        output charset is a 7bit encoding, so re-encoding the decoded
+        string using the ascii codec produces the correct string version
+        of the content.
+        """
+        if not string:
+            return string
+        if self.body_encoding is BASE64:
+            if isinstance(string, str):
+                string = string.encode(self.output_charset)
+            return email.base64mime.body_encode(string)
+        elif self.body_encoding is QP:
+            # quopromime.body_encode takes a string, but operates on it as if
+            # it were a list of byte codes.  For a (minimal) history on why
+            # this is so, see changeset 0cf700464177.  To correctly encode a
+            # character set, then, we must turn it into pseudo bytes via the
+            # latin1 charset, which will encode any byte as a single code point
+            # between 0 and 255, which is what body_encode is expecting.
+            if isinstance(string, str):
+                string = string.encode(self.output_charset)
+            string = string.decode('latin1')
+            return email.quoprimime.body_encode(string)
+        else:
+            if isinstance(string, str):
+                string = string.encode(self.output_charset).decode('ascii')
+            return string

+ 90 - 0
Lib/site-packages/future/backports/email/encoders.py

@@ -0,0 +1,90 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Encodings and related functions."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import str
+
+__all__ = [
+    'encode_7or8bit',
+    'encode_base64',
+    'encode_noop',
+    'encode_quopri',
+    ]
+
+
+try:
+    from base64 import encodebytes as _bencode
+except ImportError:
+    # Py2 compatibility. TODO: test this!
+    from base64 import encodestring as _bencode
+from quopri import encodestring as _encodestring
+
+
+def _qencode(s):
+    enc = _encodestring(s, quotetabs=True)
+    # Must encode spaces, which quopri.encodestring() doesn't do
+    return enc.replace(' ', '=20')
+
+
+def encode_base64(msg):
+    """Encode the message's payload in Base64.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload()
+    encdata = str(_bencode(orig), 'ascii')
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'base64'
+
+
+def encode_quopri(msg):
+    """Encode the message's payload in quoted-printable.
+
+    Also, add an appropriate Content-Transfer-Encoding header.
+    """
+    orig = msg.get_payload()
+    encdata = _qencode(orig)
+    msg.set_payload(encdata)
+    msg['Content-Transfer-Encoding'] = 'quoted-printable'
+
+
+def encode_7or8bit(msg):
+    """Set the Content-Transfer-Encoding header to 7bit or 8bit."""
+    orig = msg.get_payload()
+    if orig is None:
+        # There's no payload.  For backwards compatibility we use 7bit
+        msg['Content-Transfer-Encoding'] = '7bit'
+        return
+    # We play a trick to make this go fast.  If encoding/decode to ASCII
+    # succeeds, we know the data must be 7bit, otherwise treat it as 8bit.
+    try:
+        if isinstance(orig, str):
+            orig.encode('ascii')
+        else:
+            orig.decode('ascii')
+    except UnicodeError:
+        charset = msg.get_charset()
+        output_cset = charset and charset.output_charset
+        # iso-2022-* is non-ASCII but encodes to a 7-bit representation
+        if output_cset and output_cset.lower().startswith('iso-2022-'):
+            msg['Content-Transfer-Encoding'] = '7bit'
+        else:
+            msg['Content-Transfer-Encoding'] = '8bit'
+    else:
+        msg['Content-Transfer-Encoding'] = '7bit'
+    if not isinstance(orig, str):
+        msg.set_payload(orig.decode('ascii', 'surrogateescape'))
+
+
+def encode_noop(msg):
+    """Do nothing."""
+    # Well, not quite *nothing*: in Python3 we have to turn bytes into a string
+    # in our internal surrogateescaped form in order to keep the model
+    # consistent.
+    orig = msg.get_payload()
+    if not isinstance(orig, str):
+        msg.set_payload(orig.decode('ascii', 'surrogateescape'))

+ 111 - 0
Lib/site-packages/future/backports/email/errors.py

@@ -0,0 +1,111 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""email package exception classes."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import super
+
+
+class MessageError(Exception):
+    """Base class for errors in the email package."""
+
+
+class MessageParseError(MessageError):
+    """Base class for message parsing errors."""
+
+
+class HeaderParseError(MessageParseError):
+    """Error while parsing headers."""
+
+
+class BoundaryError(MessageParseError):
+    """Couldn't find terminating boundary."""
+
+
+class MultipartConversionError(MessageError, TypeError):
+    """Conversion to a multipart is prohibited."""
+
+
+class CharsetError(MessageError):
+    """An illegal charset was given."""
+
+
+# These are parsing defects which the parser was able to work around.
+class MessageDefect(ValueError):
+    """Base class for a message defect."""
+
+    def __init__(self, line=None):
+        if line is not None:
+            super().__init__(line)
+        self.line = line
+
+class NoBoundaryInMultipartDefect(MessageDefect):
+    """A message claimed to be a multipart but had no boundary parameter."""
+
+class StartBoundaryNotFoundDefect(MessageDefect):
+    """The claimed start boundary was never found."""
+
+class CloseBoundaryNotFoundDefect(MessageDefect):
+    """A start boundary was found, but not the corresponding close boundary."""
+
+class FirstHeaderLineIsContinuationDefect(MessageDefect):
+    """A message had a continuation line as its first header line."""
+
+class MisplacedEnvelopeHeaderDefect(MessageDefect):
+    """A 'Unix-from' header was found in the middle of a header block."""
+
+class MissingHeaderBodySeparatorDefect(MessageDefect):
+    """Found line with no leading whitespace and no colon before blank line."""
+# XXX: backward compatibility, just in case (it was never emitted).
+MalformedHeaderDefect = MissingHeaderBodySeparatorDefect
+
+class MultipartInvariantViolationDefect(MessageDefect):
+    """A message claimed to be a multipart but no subparts were found."""
+
+class InvalidMultipartContentTransferEncodingDefect(MessageDefect):
+    """An invalid content transfer encoding was set on the multipart itself."""
+
+class UndecodableBytesDefect(MessageDefect):
+    """Header contained bytes that could not be decoded"""
+
+class InvalidBase64PaddingDefect(MessageDefect):
+    """base64 encoded sequence had an incorrect length"""
+
+class InvalidBase64CharactersDefect(MessageDefect):
+    """base64 encoded sequence had characters not in base64 alphabet"""
+
+# These errors are specific to header parsing.
+
+class HeaderDefect(MessageDefect):
+    """Base class for a header defect."""
+
+    def __init__(self, *args, **kw):
+        super().__init__(*args, **kw)
+
+class InvalidHeaderDefect(HeaderDefect):
+    """Header is not valid, message gives details."""
+
+class HeaderMissingRequiredValue(HeaderDefect):
+    """A header that must have a value had none"""
+
+class NonPrintableDefect(HeaderDefect):
+    """ASCII characters outside the ascii-printable range found"""
+
+    def __init__(self, non_printables):
+        super().__init__(non_printables)
+        self.non_printables = non_printables
+
+    def __str__(self):
+        return ("the following ASCII non-printables found in header: "
+            "{}".format(self.non_printables))
+
+class ObsoleteHeaderDefect(HeaderDefect):
+    """Header uses syntax declared obsolete by RFC 5322"""
+
+class NonASCIILocalPartDefect(HeaderDefect):
+    """local_part contains non-ASCII characters"""
+    # This defect only occurs during unicode parsing, not when
+    # parsing messages decoded from binary.

+ 525 - 0
Lib/site-packages/future/backports/email/feedparser.py

@@ -0,0 +1,525 @@
+# Copyright (C) 2004-2006 Python Software Foundation
+# Authors: Baxter, Wouters and Warsaw
+# Contact: email-sig@python.org
+
+"""FeedParser - An email feed parser.
+
+The feed parser implements an interface for incrementally parsing an email
+message, line by line.  This has advantages for certain applications, such as
+those reading email messages off a socket.
+
+FeedParser.feed() is the primary interface for pushing new data into the
+parser.  It returns when there's nothing more it can do with the available
+data.  When you have no more data to push into the parser, call .close().
+This completes the parsing and returns the root message object.
+
+The other advantage of this parser is that it will never raise a parsing
+exception.  Instead, when it finds something unexpected, it adds a 'defect' to
+the current message.  Defects are just instances that live on the message
+object's .defects attribute.
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import object, range, super
+from future.utils import implements_iterator, PY3
+
+__all__ = ['FeedParser', 'BytesFeedParser']
+
+import re
+
+from future.backports.email import errors
+from future.backports.email import message
+from future.backports.email._policybase import compat32
+
+NLCRE = re.compile('\r\n|\r|\n')
+NLCRE_bol = re.compile('(\r\n|\r|\n)')
+NLCRE_eol = re.compile('(\r\n|\r|\n)\Z')
+NLCRE_crack = re.compile('(\r\n|\r|\n)')
+# RFC 2822 $3.6.8 Optional fields.  ftext is %d33-57 / %d59-126, Any character
+# except controls, SP, and ":".
+headerRE = re.compile(r'^(From |[\041-\071\073-\176]{1,}:|[\t ])')
+EMPTYSTRING = ''
+NL = '\n'
+
+NeedMoreData = object()
+
+
+# @implements_iterator
+class BufferedSubFile(object):
+    """A file-ish object that can have new data loaded into it.
+
+    You can also push and pop line-matching predicates onto a stack.  When the
+    current predicate matches the current line, a false EOF response
+    (i.e. empty string) is returned instead.  This lets the parser adhere to a
+    simple abstraction -- it parses until EOF closes the current message.
+    """
+    def __init__(self):
+        # The last partial line pushed into this object.
+        self._partial = ''
+        # The list of full, pushed lines, in reverse order
+        self._lines = []
+        # The stack of false-EOF checking predicates.
+        self._eofstack = []
+        # A flag indicating whether the file has been closed or not.
+        self._closed = False
+
+    def push_eof_matcher(self, pred):
+        self._eofstack.append(pred)
+
+    def pop_eof_matcher(self):
+        return self._eofstack.pop()
+
+    def close(self):
+        # Don't forget any trailing partial line.
+        self._lines.append(self._partial)
+        self._partial = ''
+        self._closed = True
+
+    def readline(self):
+        if not self._lines:
+            if self._closed:
+                return ''
+            return NeedMoreData
+        # Pop the line off the stack and see if it matches the current
+        # false-EOF predicate.
+        line = self._lines.pop()
+        # RFC 2046, section 5.1.2 requires us to recognize outer level
+        # boundaries at any level of inner nesting.  Do this, but be sure it's
+        # in the order of most to least nested.
+        for ateof in self._eofstack[::-1]:
+            if ateof(line):
+                # We're at the false EOF.  But push the last line back first.
+                self._lines.append(line)
+                return ''
+        return line
+
+    def unreadline(self, line):
+        # Let the consumer push a line back into the buffer.
+        assert line is not NeedMoreData
+        self._lines.append(line)
+
+    def push(self, data):
+        """Push some new data into this object."""
+        # Handle any previous leftovers
+        data, self._partial = self._partial + data, ''
+        # Crack into lines, but preserve the newlines on the end of each
+        parts = NLCRE_crack.split(data)
+        # The *ahem* interesting behaviour of re.split when supplied grouping
+        # parentheses is that the last element of the resulting list is the
+        # data after the final RE.  In the case of a NL/CR terminated string,
+        # this is the empty string.
+        self._partial = parts.pop()
+        #GAN 29Mar09  bugs 1555570, 1721862  Confusion at 8K boundary ending with \r:
+        # is there a \n to follow later?
+        if not self._partial and parts and parts[-1].endswith('\r'):
+            self._partial = parts.pop(-2)+parts.pop()
+        # parts is a list of strings, alternating between the line contents
+        # and the eol character(s).  Gather up a list of lines after
+        # re-attaching the newlines.
+        lines = []
+        for i in range(len(parts) // 2):
+            lines.append(parts[i*2] + parts[i*2+1])
+        self.pushlines(lines)
+
+    def pushlines(self, lines):
+        # Reverse and insert at the front of the lines.
+        self._lines[:0] = lines[::-1]
+
+    def __iter__(self):
+        return self
+
+    def __next__(self):
+        line = self.readline()
+        if line == '':
+            raise StopIteration
+        return line
+
+
+class FeedParser(object):
+    """A feed-style parser of email."""
+
+    def __init__(self, _factory=message.Message, **_3to2kwargs):
+        if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
+        else: policy = compat32
+        """_factory is called with no arguments to create a new message obj
+
+        The policy keyword specifies a policy object that controls a number of
+        aspects of the parser's operation.  The default policy maintains
+        backward compatibility.
+
+        """
+        self._factory = _factory
+        self.policy = policy
+        try:
+            _factory(policy=self.policy)
+            self._factory_kwds = lambda: {'policy': self.policy}
+        except TypeError:
+            # Assume this is an old-style factory
+            self._factory_kwds = lambda: {}
+        self._input = BufferedSubFile()
+        self._msgstack = []
+        if PY3:
+            self._parse = self._parsegen().__next__
+        else:
+            self._parse = self._parsegen().next
+        self._cur = None
+        self._last = None
+        self._headersonly = False
+
+    # Non-public interface for supporting Parser's headersonly flag
+    def _set_headersonly(self):
+        self._headersonly = True
+
+    def feed(self, data):
+        """Push more data into the parser."""
+        self._input.push(data)
+        self._call_parse()
+
+    def _call_parse(self):
+        try:
+            self._parse()
+        except StopIteration:
+            pass
+
+    def close(self):
+        """Parse all remaining data and return the root message object."""
+        self._input.close()
+        self._call_parse()
+        root = self._pop_message()
+        assert not self._msgstack
+        # Look for final set of defects
+        if root.get_content_maintype() == 'multipart' \
+               and not root.is_multipart():
+            defect = errors.MultipartInvariantViolationDefect()
+            self.policy.handle_defect(root, defect)
+        return root
+
+    def _new_message(self):
+        msg = self._factory(**self._factory_kwds())
+        if self._cur and self._cur.get_content_type() == 'multipart/digest':
+            msg.set_default_type('message/rfc822')
+        if self._msgstack:
+            self._msgstack[-1].attach(msg)
+        self._msgstack.append(msg)
+        self._cur = msg
+        self._last = msg
+
+    def _pop_message(self):
+        retval = self._msgstack.pop()
+        if self._msgstack:
+            self._cur = self._msgstack[-1]
+        else:
+            self._cur = None
+        return retval
+
+    def _parsegen(self):
+        # Create a new message and start by parsing headers.
+        self._new_message()
+        headers = []
+        # Collect the headers, searching for a line that doesn't match the RFC
+        # 2822 header or continuation pattern (including an empty line).
+        for line in self._input:
+            if line is NeedMoreData:
+                yield NeedMoreData
+                continue
+            if not headerRE.match(line):
+                # If we saw the RFC defined header/body separator
+                # (i.e. newline), just throw it away. Otherwise the line is
+                # part of the body so push it back.
+                if not NLCRE.match(line):
+                    defect = errors.MissingHeaderBodySeparatorDefect()
+                    self.policy.handle_defect(self._cur, defect)
+                    self._input.unreadline(line)
+                break
+            headers.append(line)
+        # Done with the headers, so parse them and figure out what we're
+        # supposed to see in the body of the message.
+        self._parse_headers(headers)
+        # Headers-only parsing is a backwards compatibility hack, which was
+        # necessary in the older parser, which could raise errors.  All
+        # remaining lines in the input are thrown into the message body.
+        if self._headersonly:
+            lines = []
+            while True:
+                line = self._input.readline()
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                if line == '':
+                    break
+                lines.append(line)
+            self._cur.set_payload(EMPTYSTRING.join(lines))
+            return
+        if self._cur.get_content_type() == 'message/delivery-status':
+            # message/delivery-status contains blocks of headers separated by
+            # a blank line.  We'll represent each header block as a separate
+            # nested message object, but the processing is a bit different
+            # than standard message/* types because there is no body for the
+            # nested messages.  A blank line separates the subparts.
+            while True:
+                self._input.push_eof_matcher(NLCRE.match)
+                for retval in self._parsegen():
+                    if retval is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                msg = self._pop_message()
+                # We need to pop the EOF matcher in order to tell if we're at
+                # the end of the current file, not the end of the last block
+                # of message headers.
+                self._input.pop_eof_matcher()
+                # The input stream must be sitting at the newline or at the
+                # EOF.  We want to see if we're at the end of this subpart, so
+                # first consume the blank line, then test the next line to see
+                # if we're at this subpart's EOF.
+                while True:
+                    line = self._input.readline()
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                while True:
+                    line = self._input.readline()
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    break
+                if line == '':
+                    break
+                # Not at EOF so this is a line we're going to need.
+                self._input.unreadline(line)
+            return
+        if self._cur.get_content_maintype() == 'message':
+            # The message claims to be a message/* type, then what follows is
+            # another RFC 2822 message.
+            for retval in self._parsegen():
+                if retval is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                break
+            self._pop_message()
+            return
+        if self._cur.get_content_maintype() == 'multipart':
+            boundary = self._cur.get_boundary()
+            if boundary is None:
+                # The message /claims/ to be a multipart but it has not
+                # defined a boundary.  That's a problem which we'll handle by
+                # reading everything until the EOF and marking the message as
+                # defective.
+                defect = errors.NoBoundaryInMultipartDefect()
+                self.policy.handle_defect(self._cur, defect)
+                lines = []
+                for line in self._input:
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                    lines.append(line)
+                self._cur.set_payload(EMPTYSTRING.join(lines))
+                return
+            # Make sure a valid content type was specified per RFC 2045:6.4.
+            if (self._cur.get('content-transfer-encoding', '8bit').lower()
+                    not in ('7bit', '8bit', 'binary')):
+                defect = errors.InvalidMultipartContentTransferEncodingDefect()
+                self.policy.handle_defect(self._cur, defect)
+            # Create a line match predicate which matches the inter-part
+            # boundary as well as the end-of-multipart boundary.  Don't push
+            # this onto the input stream until we've scanned past the
+            # preamble.
+            separator = '--' + boundary
+            boundaryre = re.compile(
+                '(?P<sep>' + re.escape(separator) +
+                r')(?P<end>--)?(?P<ws>[ \t]*)(?P<linesep>\r\n|\r|\n)?$')
+            capturing_preamble = True
+            preamble = []
+            linesep = False
+            close_boundary_seen = False
+            while True:
+                line = self._input.readline()
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                if line == '':
+                    break
+                mo = boundaryre.match(line)
+                if mo:
+                    # If we're looking at the end boundary, we're done with
+                    # this multipart.  If there was a newline at the end of
+                    # the closing boundary, then we need to initialize the
+                    # epilogue with the empty string (see below).
+                    if mo.group('end'):
+                        close_boundary_seen = True
+                        linesep = mo.group('linesep')
+                        break
+                    # We saw an inter-part boundary.  Were we in the preamble?
+                    if capturing_preamble:
+                        if preamble:
+                            # According to RFC 2046, the last newline belongs
+                            # to the boundary.
+                            lastline = preamble[-1]
+                            eolmo = NLCRE_eol.search(lastline)
+                            if eolmo:
+                                preamble[-1] = lastline[:-len(eolmo.group(0))]
+                            self._cur.preamble = EMPTYSTRING.join(preamble)
+                        capturing_preamble = False
+                        self._input.unreadline(line)
+                        continue
+                    # We saw a boundary separating two parts.  Consume any
+                    # multiple boundary lines that may be following.  Our
+                    # interpretation of RFC 2046 BNF grammar does not produce
+                    # body parts within such double boundaries.
+                    while True:
+                        line = self._input.readline()
+                        if line is NeedMoreData:
+                            yield NeedMoreData
+                            continue
+                        mo = boundaryre.match(line)
+                        if not mo:
+                            self._input.unreadline(line)
+                            break
+                    # Recurse to parse this subpart; the input stream points
+                    # at the subpart's first line.
+                    self._input.push_eof_matcher(boundaryre.match)
+                    for retval in self._parsegen():
+                        if retval is NeedMoreData:
+                            yield NeedMoreData
+                            continue
+                        break
+                    # Because of RFC 2046, the newline preceding the boundary
+                    # separator actually belongs to the boundary, not the
+                    # previous subpart's payload (or epilogue if the previous
+                    # part is a multipart).
+                    if self._last.get_content_maintype() == 'multipart':
+                        epilogue = self._last.epilogue
+                        if epilogue == '':
+                            self._last.epilogue = None
+                        elif epilogue is not None:
+                            mo = NLCRE_eol.search(epilogue)
+                            if mo:
+                                end = len(mo.group(0))
+                                self._last.epilogue = epilogue[:-end]
+                    else:
+                        payload = self._last._payload
+                        if isinstance(payload, str):
+                            mo = NLCRE_eol.search(payload)
+                            if mo:
+                                payload = payload[:-len(mo.group(0))]
+                                self._last._payload = payload
+                    self._input.pop_eof_matcher()
+                    self._pop_message()
+                    # Set the multipart up for newline cleansing, which will
+                    # happen if we're in a nested multipart.
+                    self._last = self._cur
+                else:
+                    # I think we must be in the preamble
+                    assert capturing_preamble
+                    preamble.append(line)
+            # We've seen either the EOF or the end boundary.  If we're still
+            # capturing the preamble, we never saw the start boundary.  Note
+            # that as a defect and store the captured text as the payload.
+            if capturing_preamble:
+                defect = errors.StartBoundaryNotFoundDefect()
+                self.policy.handle_defect(self._cur, defect)
+                self._cur.set_payload(EMPTYSTRING.join(preamble))
+                epilogue = []
+                for line in self._input:
+                    if line is NeedMoreData:
+                        yield NeedMoreData
+                        continue
+                self._cur.epilogue = EMPTYSTRING.join(epilogue)
+                return
+            # If we're not processing the preamble, then we might have seen
+            # EOF without seeing that end boundary...that is also a defect.
+            if not close_boundary_seen:
+                defect = errors.CloseBoundaryNotFoundDefect()
+                self.policy.handle_defect(self._cur, defect)
+                return
+            # Everything from here to the EOF is epilogue.  If the end boundary
+            # ended in a newline, we'll need to make sure the epilogue isn't
+            # None
+            if linesep:
+                epilogue = ['']
+            else:
+                epilogue = []
+            for line in self._input:
+                if line is NeedMoreData:
+                    yield NeedMoreData
+                    continue
+                epilogue.append(line)
+            # Any CRLF at the front of the epilogue is not technically part of
+            # the epilogue.  Also, watch out for an empty string epilogue,
+            # which means a single newline.
+            if epilogue:
+                firstline = epilogue[0]
+                bolmo = NLCRE_bol.match(firstline)
+                if bolmo:
+                    epilogue[0] = firstline[len(bolmo.group(0)):]
+            self._cur.epilogue = EMPTYSTRING.join(epilogue)
+            return
+        # Otherwise, it's some non-multipart type, so the entire rest of the
+        # file contents becomes the payload.
+        lines = []
+        for line in self._input:
+            if line is NeedMoreData:
+                yield NeedMoreData
+                continue
+            lines.append(line)
+        self._cur.set_payload(EMPTYSTRING.join(lines))
+
+    def _parse_headers(self, lines):
+        # Passed a list of lines that make up the headers for the current msg
+        lastheader = ''
+        lastvalue = []
+        for lineno, line in enumerate(lines):
+            # Check for continuation
+            if line[0] in ' \t':
+                if not lastheader:
+                    # The first line of the headers was a continuation.  This
+                    # is illegal, so let's note the defect, store the illegal
+                    # line, and ignore it for purposes of headers.
+                    defect = errors.FirstHeaderLineIsContinuationDefect(line)
+                    self.policy.handle_defect(self._cur, defect)
+                    continue
+                lastvalue.append(line)
+                continue
+            if lastheader:
+                self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+                lastheader, lastvalue = '', []
+            # Check for envelope header, i.e. unix-from
+            if line.startswith('From '):
+                if lineno == 0:
+                    # Strip off the trailing newline
+                    mo = NLCRE_eol.search(line)
+                    if mo:
+                        line = line[:-len(mo.group(0))]
+                    self._cur.set_unixfrom(line)
+                    continue
+                elif lineno == len(lines) - 1:
+                    # Something looking like a unix-from at the end - it's
+                    # probably the first line of the body, so push back the
+                    # line and stop.
+                    self._input.unreadline(line)
+                    return
+                else:
+                    # Weirdly placed unix-from line.  Note this as a defect
+                    # and ignore it.
+                    defect = errors.MisplacedEnvelopeHeaderDefect(line)
+                    self._cur.defects.append(defect)
+                    continue
+            # Split the line on the colon separating field name from value.
+            # There will always be a colon, because if there wasn't the part of
+            # the parser that calls us would have started parsing the body.
+            i = line.find(':')
+            assert i>0, "_parse_headers fed line with no : and no leading WS"
+            lastheader = line[:i]
+            lastvalue = [line]
+        # Done with all the lines, so handle the last header.
+        if lastheader:
+            self._cur.set_raw(*self.policy.header_source_parse(lastvalue))
+
+
+class BytesFeedParser(FeedParser):
+    """Like FeedParser, but feed accepts bytes."""
+
+    def feed(self, data):
+        super().feed(data.decode('ascii', 'surrogateescape'))

+ 498 - 0
Lib/site-packages/future/backports/email/generator.py

@@ -0,0 +1,498 @@
+# Copyright (C) 2001-2010 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Classes to generate plain text from a message object tree."""
+from __future__ import print_function
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import super
+from future.builtins import str
+
+__all__ = ['Generator', 'DecodedGenerator', 'BytesGenerator']
+
+import re
+import sys
+import time
+import random
+import warnings
+
+from io import StringIO, BytesIO
+from future.backports.email._policybase import compat32
+from future.backports.email.header import Header
+from future.backports.email.utils import _has_surrogates
+import future.backports.email.charset as _charset
+
+UNDERSCORE = '_'
+NL = '\n'  # XXX: no longer used by the code below.
+
+fcre = re.compile(r'^From ', re.MULTILINE)
+
+
+class Generator(object):
+    """Generates output from a Message object tree.
+
+    This basic generator writes the message to the given file object as plain
+    text.
+    """
+    #
+    # Public interface
+    #
+
+    def __init__(self, outfp, mangle_from_=True, maxheaderlen=None, **_3to2kwargs):
+        if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
+        else: policy = None
+        """Create the generator for message flattening.
+
+        outfp is the output file-like object for writing the message to.  It
+        must have a write() method.
+
+        Optional mangle_from_ is a flag that, when True (the default), escapes
+        From_ lines in the body of the message by putting a `>' in front of
+        them.
+
+        Optional maxheaderlen specifies the longest length for a non-continued
+        header.  When a header line is longer (in characters, with tabs
+        expanded to 8 spaces) than maxheaderlen, the header will split as
+        defined in the Header class.  Set maxheaderlen to zero to disable
+        header wrapping.  The default is 78, as recommended (but not required)
+        by RFC 2822.
+
+        The policy keyword specifies a policy object that controls a number of
+        aspects of the generator's operation.  The default policy maintains
+        backward compatibility.
+
+        """
+        self._fp = outfp
+        self._mangle_from_ = mangle_from_
+        self.maxheaderlen = maxheaderlen
+        self.policy = policy
+
+    def write(self, s):
+        # Just delegate to the file object
+        self._fp.write(s)
+
+    def flatten(self, msg, unixfrom=False, linesep=None):
+        r"""Print the message object tree rooted at msg to the output file
+        specified when the Generator instance was created.
+
+        unixfrom is a flag that forces the printing of a Unix From_ delimiter
+        before the first object in the message tree.  If the original message
+        has no From_ delimiter, a `standard' one is crafted.  By default, this
+        is False to inhibit the printing of any From_ delimiter.
+
+        Note that for subobjects, no From_ line is printed.
+
+        linesep specifies the characters used to indicate a new line in
+        the output.  The default value is determined by the policy.
+
+        """
+        # We use the _XXX constants for operating on data that comes directly
+        # from the msg, and _encoded_XXX constants for operating on data that
+        # has already been converted (to bytes in the BytesGenerator) and
+        # inserted into a temporary buffer.
+        policy = msg.policy if self.policy is None else self.policy
+        if linesep is not None:
+            policy = policy.clone(linesep=linesep)
+        if self.maxheaderlen is not None:
+            policy = policy.clone(max_line_length=self.maxheaderlen)
+        self._NL = policy.linesep
+        self._encoded_NL = self._encode(self._NL)
+        self._EMPTY = ''
+        self._encoded_EMTPY = self._encode('')
+        # Because we use clone (below) when we recursively process message
+        # subparts, and because clone uses the computed policy (not None),
+        # submessages will automatically get set to the computed policy when
+        # they are processed by this code.
+        old_gen_policy = self.policy
+        old_msg_policy = msg.policy
+        try:
+            self.policy = policy
+            msg.policy = policy
+            if unixfrom:
+                ufrom = msg.get_unixfrom()
+                if not ufrom:
+                    ufrom = 'From nobody ' + time.ctime(time.time())
+                self.write(ufrom + self._NL)
+            self._write(msg)
+        finally:
+            self.policy = old_gen_policy
+            msg.policy = old_msg_policy
+
+    def clone(self, fp):
+        """Clone this generator with the exact same options."""
+        return self.__class__(fp,
+                              self._mangle_from_,
+                              None, # Use policy setting, which we've adjusted
+                              policy=self.policy)
+
+    #
+    # Protected interface - undocumented ;/
+    #
+
+    # Note that we use 'self.write' when what we are writing is coming from
+    # the source, and self._fp.write when what we are writing is coming from a
+    # buffer (because the Bytes subclass has already had a chance to transform
+    # the data in its write method in that case).  This is an entirely
+    # pragmatic split determined by experiment; we could be more general by
+    # always using write and having the Bytes subclass write method detect when
+    # it has already transformed the input; but, since this whole thing is a
+    # hack anyway this seems good enough.
+
+    # Similarly, we have _XXX and _encoded_XXX attributes that are used on
+    # source and buffer data, respectively.
+    _encoded_EMPTY = ''
+
+    def _new_buffer(self):
+        # BytesGenerator overrides this to return BytesIO.
+        return StringIO()
+
+    def _encode(self, s):
+        # BytesGenerator overrides this to encode strings to bytes.
+        return s
+
+    def _write_lines(self, lines):
+        # We have to transform the line endings.
+        if not lines:
+            return
+        lines = lines.splitlines(True)
+        for line in lines[:-1]:
+            self.write(line.rstrip('\r\n'))
+            self.write(self._NL)
+        laststripped = lines[-1].rstrip('\r\n')
+        self.write(laststripped)
+        if len(lines[-1]) != len(laststripped):
+            self.write(self._NL)
+
+    def _write(self, msg):
+        # We can't write the headers yet because of the following scenario:
+        # say a multipart message includes the boundary string somewhere in
+        # its body.  We'd have to calculate the new boundary /before/ we write
+        # the headers so that we can write the correct Content-Type:
+        # parameter.
+        #
+        # The way we do this, so as to make the _handle_*() methods simpler,
+        # is to cache any subpart writes into a buffer.  The we write the
+        # headers and the buffer contents.  That way, subpart handlers can
+        # Do The Right Thing, and can still modify the Content-Type: header if
+        # necessary.
+        oldfp = self._fp
+        try:
+            self._fp = sfp = self._new_buffer()
+            self._dispatch(msg)
+        finally:
+            self._fp = oldfp
+        # Write the headers.  First we see if the message object wants to
+        # handle that itself.  If not, we'll do it generically.
+        meth = getattr(msg, '_write_headers', None)
+        if meth is None:
+            self._write_headers(msg)
+        else:
+            meth(self)
+        self._fp.write(sfp.getvalue())
+
+    def _dispatch(self, msg):
+        # Get the Content-Type: for the message, then try to dispatch to
+        # self._handle_<maintype>_<subtype>().  If there's no handler for the
+        # full MIME type, then dispatch to self._handle_<maintype>().  If
+        # that's missing too, then dispatch to self._writeBody().
+        main = msg.get_content_maintype()
+        sub = msg.get_content_subtype()
+        specific = UNDERSCORE.join((main, sub)).replace('-', '_')
+        meth = getattr(self, '_handle_' + specific, None)
+        if meth is None:
+            generic = main.replace('-', '_')
+            meth = getattr(self, '_handle_' + generic, None)
+            if meth is None:
+                meth = self._writeBody
+        meth(msg)
+
+    #
+    # Default handlers
+    #
+
+    def _write_headers(self, msg):
+        for h, v in msg.raw_items():
+            self.write(self.policy.fold(h, v))
+        # A blank line always separates headers from body
+        self.write(self._NL)
+
+    #
+    # Handlers for writing types and subtypes
+    #
+
+    def _handle_text(self, msg):
+        payload = msg.get_payload()
+        if payload is None:
+            return
+        if not isinstance(payload, str):
+            raise TypeError('string payload expected: %s' % type(payload))
+        if _has_surrogates(msg._payload):
+            charset = msg.get_param('charset')
+            if charset is not None:
+                del msg['content-transfer-encoding']
+                msg.set_payload(payload, charset)
+                payload = msg.get_payload()
+        if self._mangle_from_:
+            payload = fcre.sub('>From ', payload)
+        self._write_lines(payload)
+
+    # Default body handler
+    _writeBody = _handle_text
+
+    def _handle_multipart(self, msg):
+        # The trick here is to write out each part separately, merge them all
+        # together, and then make sure that the boundary we've chosen isn't
+        # present in the payload.
+        msgtexts = []
+        subparts = msg.get_payload()
+        if subparts is None:
+            subparts = []
+        elif isinstance(subparts, str):
+            # e.g. a non-strict parse of a message with no starting boundary.
+            self.write(subparts)
+            return
+        elif not isinstance(subparts, list):
+            # Scalar payload
+            subparts = [subparts]
+        for part in subparts:
+            s = self._new_buffer()
+            g = self.clone(s)
+            g.flatten(part, unixfrom=False, linesep=self._NL)
+            msgtexts.append(s.getvalue())
+        # BAW: What about boundaries that are wrapped in double-quotes?
+        boundary = msg.get_boundary()
+        if not boundary:
+            # Create a boundary that doesn't appear in any of the
+            # message texts.
+            alltext = self._encoded_NL.join(msgtexts)
+            boundary = self._make_boundary(alltext)
+            msg.set_boundary(boundary)
+        # If there's a preamble, write it out, with a trailing CRLF
+        if msg.preamble is not None:
+            if self._mangle_from_:
+                preamble = fcre.sub('>From ', msg.preamble)
+            else:
+                preamble = msg.preamble
+            self._write_lines(preamble)
+            self.write(self._NL)
+        # dash-boundary transport-padding CRLF
+        self.write('--' + boundary + self._NL)
+        # body-part
+        if msgtexts:
+            self._fp.write(msgtexts.pop(0))
+        # *encapsulation
+        # --> delimiter transport-padding
+        # --> CRLF body-part
+        for body_part in msgtexts:
+            # delimiter transport-padding CRLF
+            self.write(self._NL + '--' + boundary + self._NL)
+            # body-part
+            self._fp.write(body_part)
+        # close-delimiter transport-padding
+        self.write(self._NL + '--' + boundary + '--')
+        if msg.epilogue is not None:
+            self.write(self._NL)
+            if self._mangle_from_:
+                epilogue = fcre.sub('>From ', msg.epilogue)
+            else:
+                epilogue = msg.epilogue
+            self._write_lines(epilogue)
+
+    def _handle_multipart_signed(self, msg):
+        # The contents of signed parts has to stay unmodified in order to keep
+        # the signature intact per RFC1847 2.1, so we disable header wrapping.
+        # RDM: This isn't enough to completely preserve the part, but it helps.
+        p = self.policy
+        self.policy = p.clone(max_line_length=0)
+        try:
+            self._handle_multipart(msg)
+        finally:
+            self.policy = p
+
+    def _handle_message_delivery_status(self, msg):
+        # We can't just write the headers directly to self's file object
+        # because this will leave an extra newline between the last header
+        # block and the boundary.  Sigh.
+        blocks = []
+        for part in msg.get_payload():
+            s = self._new_buffer()
+            g = self.clone(s)
+            g.flatten(part, unixfrom=False, linesep=self._NL)
+            text = s.getvalue()
+            lines = text.split(self._encoded_NL)
+            # Strip off the unnecessary trailing empty line
+            if lines and lines[-1] == self._encoded_EMPTY:
+                blocks.append(self._encoded_NL.join(lines[:-1]))
+            else:
+                blocks.append(text)
+        # Now join all the blocks with an empty line.  This has the lovely
+        # effect of separating each block with an empty line, but not adding
+        # an extra one after the last one.
+        self._fp.write(self._encoded_NL.join(blocks))
+
+    def _handle_message(self, msg):
+        s = self._new_buffer()
+        g = self.clone(s)
+        # The payload of a message/rfc822 part should be a multipart sequence
+        # of length 1.  The zeroth element of the list should be the Message
+        # object for the subpart.  Extract that object, stringify it, and
+        # write it out.
+        # Except, it turns out, when it's a string instead, which happens when
+        # and only when HeaderParser is used on a message of mime type
+        # message/rfc822.  Such messages are generated by, for example,
+        # Groupwise when forwarding unadorned messages.  (Issue 7970.)  So
+        # in that case we just emit the string body.
+        payload = msg._payload
+        if isinstance(payload, list):
+            g.flatten(msg.get_payload(0), unixfrom=False, linesep=self._NL)
+            payload = s.getvalue()
+        else:
+            payload = self._encode(payload)
+        self._fp.write(payload)
+
+    # This used to be a module level function; we use a classmethod for this
+    # and _compile_re so we can continue to provide the module level function
+    # for backward compatibility by doing
+    #   _make_boudary = Generator._make_boundary
+    # at the end of the module.  It *is* internal, so we could drop that...
+    @classmethod
+    def _make_boundary(cls, text=None):
+        # Craft a random boundary.  If text is given, ensure that the chosen
+        # boundary doesn't appear in the text.
+        token = random.randrange(sys.maxsize)
+        boundary = ('=' * 15) + (_fmt % token) + '=='
+        if text is None:
+            return boundary
+        b = boundary
+        counter = 0
+        while True:
+            cre = cls._compile_re('^--' + re.escape(b) + '(--)?$', re.MULTILINE)
+            if not cre.search(text):
+                break
+            b = boundary + '.' + str(counter)
+            counter += 1
+        return b
+
+    @classmethod
+    def _compile_re(cls, s, flags):
+        return re.compile(s, flags)
+
+class BytesGenerator(Generator):
+    """Generates a bytes version of a Message object tree.
+
+    Functionally identical to the base Generator except that the output is
+    bytes and not string.  When surrogates were used in the input to encode
+    bytes, these are decoded back to bytes for output.  If the policy has
+    cte_type set to 7bit, then the message is transformed such that the
+    non-ASCII bytes are properly content transfer encoded, using the charset
+    unknown-8bit.
+
+    The outfp object must accept bytes in its write method.
+    """
+
+    # Bytes versions of this constant for use in manipulating data from
+    # the BytesIO buffer.
+    _encoded_EMPTY = b''
+
+    def write(self, s):
+        self._fp.write(str(s).encode('ascii', 'surrogateescape'))
+
+    def _new_buffer(self):
+        return BytesIO()
+
+    def _encode(self, s):
+        return s.encode('ascii')
+
+    def _write_headers(self, msg):
+        # This is almost the same as the string version, except for handling
+        # strings with 8bit bytes.
+        for h, v in msg.raw_items():
+            self._fp.write(self.policy.fold_binary(h, v))
+        # A blank line always separates headers from body
+        self.write(self._NL)
+
+    def _handle_text(self, msg):
+        # If the string has surrogates the original source was bytes, so
+        # just write it back out.
+        if msg._payload is None:
+            return
+        if _has_surrogates(msg._payload) and not self.policy.cte_type=='7bit':
+            if self._mangle_from_:
+                msg._payload = fcre.sub(">From ", msg._payload)
+            self._write_lines(msg._payload)
+        else:
+            super(BytesGenerator,self)._handle_text(msg)
+
+    # Default body handler
+    _writeBody = _handle_text
+
+    @classmethod
+    def _compile_re(cls, s, flags):
+        return re.compile(s.encode('ascii'), flags)
+
+
+_FMT = '[Non-text (%(type)s) part of message omitted, filename %(filename)s]'
+
+class DecodedGenerator(Generator):
+    """Generates a text representation of a message.
+
+    Like the Generator base class, except that non-text parts are substituted
+    with a format string representing the part.
+    """
+    def __init__(self, outfp, mangle_from_=True, maxheaderlen=78, fmt=None):
+        """Like Generator.__init__() except that an additional optional
+        argument is allowed.
+
+        Walks through all subparts of a message.  If the subpart is of main
+        type `text', then it prints the decoded payload of the subpart.
+
+        Otherwise, fmt is a format string that is used instead of the message
+        payload.  fmt is expanded with the following keywords (in
+        %(keyword)s format):
+
+        type       : Full MIME type of the non-text part
+        maintype   : Main MIME type of the non-text part
+        subtype    : Sub-MIME type of the non-text part
+        filename   : Filename of the non-text part
+        description: Description associated with the non-text part
+        encoding   : Content transfer encoding of the non-text part
+
+        The default value for fmt is None, meaning
+
+        [Non-text (%(type)s) part of message omitted, filename %(filename)s]
+        """
+        Generator.__init__(self, outfp, mangle_from_, maxheaderlen)
+        if fmt is None:
+            self._fmt = _FMT
+        else:
+            self._fmt = fmt
+
+    def _dispatch(self, msg):
+        for part in msg.walk():
+            maintype = part.get_content_maintype()
+            if maintype == 'text':
+                print(part.get_payload(decode=False), file=self)
+            elif maintype == 'multipart':
+                # Just skip this
+                pass
+            else:
+                print(self._fmt % {
+                    'type'       : part.get_content_type(),
+                    'maintype'   : part.get_content_maintype(),
+                    'subtype'    : part.get_content_subtype(),
+                    'filename'   : part.get_filename('[no filename]'),
+                    'description': part.get('Content-Description',
+                                            '[no description]'),
+                    'encoding'   : part.get('Content-Transfer-Encoding',
+                                            '[no encoding]'),
+                    }, file=self)
+
+
+# Helper used by Generator._make_boundary
+_width = len(repr(sys.maxsize-1))
+_fmt = '%%0%dd' % _width
+
+# Backward compatibility
+_make_boundary = Generator._make_boundary

+ 581 - 0
Lib/site-packages/future/backports/email/header.py

@@ -0,0 +1,581 @@
+# Copyright (C) 2002-2007 Python Software Foundation
+# Author: Ben Gertzfield, Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Header encoding and decoding functionality."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import bytes, range, str, super, zip
+
+__all__ = [
+    'Header',
+    'decode_header',
+    'make_header',
+    ]
+
+import re
+import binascii
+
+from future.backports import email
+from future.backports.email import base64mime
+from future.backports.email.errors import HeaderParseError
+import future.backports.email.charset as _charset
+
+# Helpers
+from future.backports.email.quoprimime import _max_append, header_decode
+
+Charset = _charset.Charset
+
+NL = '\n'
+SPACE = ' '
+BSPACE = b' '
+SPACE8 = ' ' * 8
+EMPTYSTRING = ''
+MAXLINELEN = 78
+FWS = ' \t'
+
+USASCII = Charset('us-ascii')
+UTF8 = Charset('utf-8')
+
+# Match encoded-word strings in the form =?charset?q?Hello_World?=
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<encoded>.*?)      # non-greedy up to the next ?= is the encoded string
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE | re.MULTILINE)
+
+# Field name regexp, including trailing colon, but not separating whitespace,
+# according to RFC 2822.  Character range is from tilde to exclamation mark.
+# For use with .match()
+fcre = re.compile(r'[\041-\176]+:$')
+
+# Find a header embedded in a putative header value.  Used to check for
+# header injection attack.
+_embeded_header = re.compile(r'\n[^ \t]+:')
+
+
+def decode_header(header):
+    """Decode a message header value without converting charset.
+
+    Returns a list of (string, charset) pairs containing each of the decoded
+    parts of the header.  Charset is None for non-encoded parts of the header,
+    otherwise a lower-case string containing the name of the character set
+    specified in the encoded string.
+
+    header may be a string that may or may not contain RFC2047 encoded words,
+    or it may be a Header object.
+
+    An email.errors.HeaderParseError may be raised when certain decoding error
+    occurs (e.g. a base64 decoding exception).
+    """
+    # If it is a Header object, we can just return the encoded chunks.
+    if hasattr(header, '_chunks'):
+        return [(_charset._encode(string, str(charset)), str(charset))
+                    for string, charset in header._chunks]
+    # If no encoding, just return the header with no charset.
+    if not ecre.search(header):
+        return [(header, None)]
+    # First step is to parse all the encoded parts into triplets of the form
+    # (encoded_string, encoding, charset).  For unencoded strings, the last
+    # two parts will be None.
+    words = []
+    for line in header.splitlines():
+        parts = ecre.split(line)
+        first = True
+        while parts:
+            unencoded = parts.pop(0)
+            if first:
+                unencoded = unencoded.lstrip()
+                first = False
+            if unencoded:
+                words.append((unencoded, None, None))
+            if parts:
+                charset = parts.pop(0).lower()
+                encoding = parts.pop(0).lower()
+                encoded = parts.pop(0)
+                words.append((encoded, encoding, charset))
+    # Now loop over words and remove words that consist of whitespace
+    # between two encoded strings.
+    import sys
+    droplist = []
+    for n, w in enumerate(words):
+        if n>1 and w[1] and words[n-2][1] and words[n-1][0].isspace():
+            droplist.append(n-1)
+    for d in reversed(droplist):
+        del words[d]
+
+    # The next step is to decode each encoded word by applying the reverse
+    # base64 or quopri transformation.  decoded_words is now a list of the
+    # form (decoded_word, charset).
+    decoded_words = []
+    for encoded_string, encoding, charset in words:
+        if encoding is None:
+            # This is an unencoded word.
+            decoded_words.append((encoded_string, charset))
+        elif encoding == 'q':
+            word = header_decode(encoded_string)
+            decoded_words.append((word, charset))
+        elif encoding == 'b':
+            paderr = len(encoded_string) % 4   # Postel's law: add missing padding
+            if paderr:
+                encoded_string += '==='[:4 - paderr]
+            try:
+                word = base64mime.decode(encoded_string)
+            except binascii.Error:
+                raise HeaderParseError('Base64 decoding error')
+            else:
+                decoded_words.append((word, charset))
+        else:
+            raise AssertionError('Unexpected encoding: ' + encoding)
+    # Now convert all words to bytes and collapse consecutive runs of
+    # similarly encoded words.
+    collapsed = []
+    last_word = last_charset = None
+    for word, charset in decoded_words:
+        if isinstance(word, str):
+            word = bytes(word, 'raw-unicode-escape')
+        if last_word is None:
+            last_word = word
+            last_charset = charset
+        elif charset != last_charset:
+            collapsed.append((last_word, last_charset))
+            last_word = word
+            last_charset = charset
+        elif last_charset is None:
+            last_word += BSPACE + word
+        else:
+            last_word += word
+    collapsed.append((last_word, last_charset))
+    return collapsed
+
+
+def make_header(decoded_seq, maxlinelen=None, header_name=None,
+                continuation_ws=' '):
+    """Create a Header from a sequence of pairs as returned by decode_header()
+
+    decode_header() takes a header value string and returns a sequence of
+    pairs of the format (decoded_string, charset) where charset is the string
+    name of the character set.
+
+    This function takes one of those sequence of pairs and returns a Header
+    instance.  Optional maxlinelen, header_name, and continuation_ws are as in
+    the Header constructor.
+    """
+    h = Header(maxlinelen=maxlinelen, header_name=header_name,
+               continuation_ws=continuation_ws)
+    for s, charset in decoded_seq:
+        # None means us-ascii but we can simply pass it on to h.append()
+        if charset is not None and not isinstance(charset, Charset):
+            charset = Charset(charset)
+        h.append(s, charset)
+    return h
+
+
+class Header(object):
+    def __init__(self, s=None, charset=None,
+                 maxlinelen=None, header_name=None,
+                 continuation_ws=' ', errors='strict'):
+        """Create a MIME-compliant header that can contain many character sets.
+
+        Optional s is the initial header value.  If None, the initial header
+        value is not set.  You can later append to the header with .append()
+        method calls.  s may be a byte string or a Unicode string, but see the
+        .append() documentation for semantics.
+
+        Optional charset serves two purposes: it has the same meaning as the
+        charset argument to the .append() method.  It also sets the default
+        character set for all subsequent .append() calls that omit the charset
+        argument.  If charset is not provided in the constructor, the us-ascii
+        charset is used both as s's initial charset and as the default for
+        subsequent .append() calls.
+
+        The maximum line length can be specified explicitly via maxlinelen. For
+        splitting the first line to a shorter value (to account for the field
+        header which isn't included in s, e.g. `Subject') pass in the name of
+        the field in header_name.  The default maxlinelen is 78 as recommended
+        by RFC 2822.
+
+        continuation_ws must be RFC 2822 compliant folding whitespace (usually
+        either a space or a hard tab) which will be prepended to continuation
+        lines.
+
+        errors is passed through to the .append() call.
+        """
+        if charset is None:
+            charset = USASCII
+        elif not isinstance(charset, Charset):
+            charset = Charset(charset)
+        self._charset = charset
+        self._continuation_ws = continuation_ws
+        self._chunks = []
+        if s is not None:
+            self.append(s, charset, errors)
+        if maxlinelen is None:
+            maxlinelen = MAXLINELEN
+        self._maxlinelen = maxlinelen
+        if header_name is None:
+            self._headerlen = 0
+        else:
+            # Take the separating colon and space into account.
+            self._headerlen = len(header_name) + 2
+
+    def __str__(self):
+        """Return the string value of the header."""
+        self._normalize()
+        uchunks = []
+        lastcs = None
+        lastspace = None
+        for string, charset in self._chunks:
+            # We must preserve spaces between encoded and non-encoded word
+            # boundaries, which means for us we need to add a space when we go
+            # from a charset to None/us-ascii, or from None/us-ascii to a
+            # charset.  Only do this for the second and subsequent chunks.
+            # Don't add a space if the None/us-ascii string already has
+            # a space (trailing or leading depending on transition)
+            nextcs = charset
+            if nextcs == _charset.UNKNOWN8BIT:
+                original_bytes = string.encode('ascii', 'surrogateescape')
+                string = original_bytes.decode('ascii', 'replace')
+            if uchunks:
+                hasspace = string and self._nonctext(string[0])
+                if lastcs not in (None, 'us-ascii'):
+                    if nextcs in (None, 'us-ascii') and not hasspace:
+                        uchunks.append(SPACE)
+                        nextcs = None
+                elif nextcs not in (None, 'us-ascii') and not lastspace:
+                    uchunks.append(SPACE)
+            lastspace = string and self._nonctext(string[-1])
+            lastcs = nextcs
+            uchunks.append(string)
+        return EMPTYSTRING.join(uchunks)
+
+    # Rich comparison operators for equality only.  BAW: does it make sense to
+    # have or explicitly disable <, <=, >, >= operators?
+    def __eq__(self, other):
+        # other may be a Header or a string.  Both are fine so coerce
+        # ourselves to a unicode (of the unencoded header value), swap the
+        # args and do another comparison.
+        return other == str(self)
+
+    def __ne__(self, other):
+        return not self == other
+
+    def append(self, s, charset=None, errors='strict'):
+        """Append a string to the MIME header.
+
+        Optional charset, if given, should be a Charset instance or the name
+        of a character set (which will be converted to a Charset instance).  A
+        value of None (the default) means that the charset given in the
+        constructor is used.
+
+        s may be a byte string or a Unicode string.  If it is a byte string
+        (i.e. isinstance(s, str) is false), then charset is the encoding of
+        that byte string, and a UnicodeError will be raised if the string
+        cannot be decoded with that charset.  If s is a Unicode string, then
+        charset is a hint specifying the character set of the characters in
+        the string.  In either case, when producing an RFC 2822 compliant
+        header using RFC 2047 rules, the string will be encoded using the
+        output codec of the charset.  If the string cannot be encoded to the
+        output codec, a UnicodeError will be raised.
+
+        Optional `errors' is passed as the errors argument to the decode
+        call if s is a byte string.
+        """
+        if charset is None:
+            charset = self._charset
+        elif not isinstance(charset, Charset):
+            charset = Charset(charset)
+        if not isinstance(s, str):
+            input_charset = charset.input_codec or 'us-ascii'
+            if input_charset == _charset.UNKNOWN8BIT:
+                s = s.decode('us-ascii', 'surrogateescape')
+            else:
+                s = s.decode(input_charset, errors)
+        # Ensure that the bytes we're storing can be decoded to the output
+        # character set, otherwise an early error is raised.
+        output_charset = charset.output_codec or 'us-ascii'
+        if output_charset != _charset.UNKNOWN8BIT:
+            try:
+                s.encode(output_charset, errors)
+            except UnicodeEncodeError:
+                if output_charset!='us-ascii':
+                    raise
+                charset = UTF8
+        self._chunks.append((s, charset))
+
+    def _nonctext(self, s):
+        """True if string s is not a ctext character of RFC822.
+        """
+        return s.isspace() or s in ('(', ')', '\\')
+
+    def encode(self, splitchars=';, \t', maxlinelen=None, linesep='\n'):
+        r"""Encode a message header into an RFC-compliant format.
+
+        There are many issues involved in converting a given string for use in
+        an email header.  Only certain character sets are readable in most
+        email clients, and as header strings can only contain a subset of
+        7-bit ASCII, care must be taken to properly convert and encode (with
+        Base64 or quoted-printable) header strings.  In addition, there is a
+        75-character length limit on any given encoded header field, so
+        line-wrapping must be performed, even with double-byte character sets.
+
+        Optional maxlinelen specifies the maximum length of each generated
+        line, exclusive of the linesep string.  Individual lines may be longer
+        than maxlinelen if a folding point cannot be found.  The first line
+        will be shorter by the length of the header name plus ": " if a header
+        name was specified at Header construction time.  The default value for
+        maxlinelen is determined at header construction time.
+
+        Optional splitchars is a string containing characters which should be
+        given extra weight by the splitting algorithm during normal header
+        wrapping.  This is in very rough support of RFC 2822's `higher level
+        syntactic breaks':  split points preceded by a splitchar are preferred
+        during line splitting, with the characters preferred in the order in
+        which they appear in the string.  Space and tab may be included in the
+        string to indicate whether preference should be given to one over the
+        other as a split point when other split chars do not appear in the line
+        being split.  Splitchars does not affect RFC 2047 encoded lines.
+
+        Optional linesep is a string to be used to separate the lines of
+        the value.  The default value is the most useful for typical
+        Python applications, but it can be set to \r\n to produce RFC-compliant
+        line separators when needed.
+        """
+        self._normalize()
+        if maxlinelen is None:
+            maxlinelen = self._maxlinelen
+        # A maxlinelen of 0 means don't wrap.  For all practical purposes,
+        # choosing a huge number here accomplishes that and makes the
+        # _ValueFormatter algorithm much simpler.
+        if maxlinelen == 0:
+            maxlinelen = 1000000
+        formatter = _ValueFormatter(self._headerlen, maxlinelen,
+                                    self._continuation_ws, splitchars)
+        lastcs = None
+        hasspace = lastspace = None
+        for string, charset in self._chunks:
+            if hasspace is not None:
+                hasspace = string and self._nonctext(string[0])
+                import sys
+                if lastcs not in (None, 'us-ascii'):
+                    if not hasspace or charset not in (None, 'us-ascii'):
+                        formatter.add_transition()
+                elif charset not in (None, 'us-ascii') and not lastspace:
+                    formatter.add_transition()
+            lastspace = string and self._nonctext(string[-1])
+            lastcs = charset
+            hasspace = False
+            lines = string.splitlines()
+            if lines:
+                formatter.feed('', lines[0], charset)
+            else:
+                formatter.feed('', '', charset)
+            for line in lines[1:]:
+                formatter.newline()
+                if charset.header_encoding is not None:
+                    formatter.feed(self._continuation_ws, ' ' + line.lstrip(),
+                                   charset)
+                else:
+                    sline = line.lstrip()
+                    fws = line[:len(line)-len(sline)]
+                    formatter.feed(fws, sline, charset)
+            if len(lines) > 1:
+                formatter.newline()
+        if self._chunks:
+            formatter.add_transition()
+        value = formatter._str(linesep)
+        if _embeded_header.search(value):
+            raise HeaderParseError("header value appears to contain "
+                "an embedded header: {!r}".format(value))
+        return value
+
+    def _normalize(self):
+        # Step 1: Normalize the chunks so that all runs of identical charsets
+        # get collapsed into a single unicode string.
+        chunks = []
+        last_charset = None
+        last_chunk = []
+        for string, charset in self._chunks:
+            if charset == last_charset:
+                last_chunk.append(string)
+            else:
+                if last_charset is not None:
+                    chunks.append((SPACE.join(last_chunk), last_charset))
+                last_chunk = [string]
+                last_charset = charset
+        if last_chunk:
+            chunks.append((SPACE.join(last_chunk), last_charset))
+        self._chunks = chunks
+
+
+class _ValueFormatter(object):
+    def __init__(self, headerlen, maxlen, continuation_ws, splitchars):
+        self._maxlen = maxlen
+        self._continuation_ws = continuation_ws
+        self._continuation_ws_len = len(continuation_ws)
+        self._splitchars = splitchars
+        self._lines = []
+        self._current_line = _Accumulator(headerlen)
+
+    def _str(self, linesep):
+        self.newline()
+        return linesep.join(self._lines)
+
+    def __str__(self):
+        return self._str(NL)
+
+    def newline(self):
+        end_of_line = self._current_line.pop()
+        if end_of_line != (' ', ''):
+            self._current_line.push(*end_of_line)
+        if len(self._current_line) > 0:
+            if self._current_line.is_onlyws():
+                self._lines[-1] += str(self._current_line)
+            else:
+                self._lines.append(str(self._current_line))
+        self._current_line.reset()
+
+    def add_transition(self):
+        self._current_line.push(' ', '')
+
+    def feed(self, fws, string, charset):
+        # If the charset has no header encoding (i.e. it is an ASCII encoding)
+        # then we must split the header at the "highest level syntactic break"
+        # possible. Note that we don't have a lot of smarts about field
+        # syntax; we just try to break on semi-colons, then commas, then
+        # whitespace.  Eventually, this should be pluggable.
+        if charset.header_encoding is None:
+            self._ascii_split(fws, string, self._splitchars)
+            return
+        # Otherwise, we're doing either a Base64 or a quoted-printable
+        # encoding which means we don't need to split the line on syntactic
+        # breaks.  We can basically just find enough characters to fit on the
+        # current line, minus the RFC 2047 chrome.  What makes this trickier
+        # though is that we have to split at octet boundaries, not character
+        # boundaries but it's only safe to split at character boundaries so at
+        # best we can only get close.
+        encoded_lines = charset.header_encode_lines(string, self._maxlengths())
+        # The first element extends the current line, but if it's None then
+        # nothing more fit on the current line so start a new line.
+        try:
+            first_line = encoded_lines.pop(0)
+        except IndexError:
+            # There are no encoded lines, so we're done.
+            return
+        if first_line is not None:
+            self._append_chunk(fws, first_line)
+        try:
+            last_line = encoded_lines.pop()
+        except IndexError:
+            # There was only one line.
+            return
+        self.newline()
+        self._current_line.push(self._continuation_ws, last_line)
+        # Everything else are full lines in themselves.
+        for line in encoded_lines:
+            self._lines.append(self._continuation_ws + line)
+
+    def _maxlengths(self):
+        # The first line's length.
+        yield self._maxlen - len(self._current_line)
+        while True:
+            yield self._maxlen - self._continuation_ws_len
+
+    def _ascii_split(self, fws, string, splitchars):
+        # The RFC 2822 header folding algorithm is simple in principle but
+        # complex in practice.  Lines may be folded any place where "folding
+        # white space" appears by inserting a linesep character in front of the
+        # FWS.  The complication is that not all spaces or tabs qualify as FWS,
+        # and we are also supposed to prefer to break at "higher level
+        # syntactic breaks".  We can't do either of these without intimate
+        # knowledge of the structure of structured headers, which we don't have
+        # here.  So the best we can do here is prefer to break at the specified
+        # splitchars, and hope that we don't choose any spaces or tabs that
+        # aren't legal FWS.  (This is at least better than the old algorithm,
+        # where we would sometimes *introduce* FWS after a splitchar, or the
+        # algorithm before that, where we would turn all white space runs into
+        # single spaces or tabs.)
+        parts = re.split("(["+FWS+"]+)", fws+string)
+        if parts[0]:
+            parts[:0] = ['']
+        else:
+            parts.pop(0)
+        for fws, part in zip(*[iter(parts)]*2):
+            self._append_chunk(fws, part)
+
+    def _append_chunk(self, fws, string):
+        self._current_line.push(fws, string)
+        if len(self._current_line) > self._maxlen:
+            # Find the best split point, working backward from the end.
+            # There might be none, on a long first line.
+            for ch in self._splitchars:
+                for i in range(self._current_line.part_count()-1, 0, -1):
+                    if ch.isspace():
+                        fws = self._current_line[i][0]
+                        if fws and fws[0]==ch:
+                            break
+                    prevpart = self._current_line[i-1][1]
+                    if prevpart and prevpart[-1]==ch:
+                        break
+                else:
+                    continue
+                break
+            else:
+                fws, part = self._current_line.pop()
+                if self._current_line._initial_size > 0:
+                    # There will be a header, so leave it on a line by itself.
+                    self.newline()
+                    if not fws:
+                        # We don't use continuation_ws here because the whitespace
+                        # after a header should always be a space.
+                        fws = ' '
+                self._current_line.push(fws, part)
+                return
+            remainder = self._current_line.pop_from(i)
+            self._lines.append(str(self._current_line))
+            self._current_line.reset(remainder)
+
+
+class _Accumulator(list):
+
+    def __init__(self, initial_size=0):
+        self._initial_size = initial_size
+        super().__init__()
+
+    def push(self, fws, string):
+        self.append((fws, string))
+
+    def pop_from(self, i=0):
+        popped = self[i:]
+        self[i:] = []
+        return popped
+
+    def pop(self):
+        if self.part_count()==0:
+            return ('', '')
+        return super().pop()
+
+    def __len__(self):
+        return sum((len(fws)+len(part) for fws, part in self),
+                   self._initial_size)
+
+    def __str__(self):
+        return EMPTYSTRING.join((EMPTYSTRING.join((fws, part))
+                                for fws, part in self))
+
+    def reset(self, startval=None):
+        if startval is None:
+            startval = []
+        self[:] = startval
+        self._initial_size = 0
+
+    def is_onlyws(self):
+        return self._initial_size==0 and (not self or str(self).isspace())
+
+    def part_count(self):
+        return super().__len__()

+ 592 - 0
Lib/site-packages/future/backports/email/headerregistry.py

@@ -0,0 +1,592 @@
+"""Representing and manipulating email headers via custom objects.
+
+This module provides an implementation of the HeaderRegistry API.
+The implementation is designed to flexibly follow RFC5322 rules.
+
+Eventually HeaderRegistry will be a public API, but it isn't yet,
+and will probably change some before that happens.
+
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+from future.builtins import super
+from future.builtins import str
+from future.utils import text_to_native_str
+from future.backports.email import utils
+from future.backports.email import errors
+from future.backports.email import _header_value_parser as parser
+
+class Address(object):
+
+    def __init__(self, display_name='', username='', domain='', addr_spec=None):
+        """Create an object represeting a full email address.
+
+        An address can have a 'display_name', a 'username', and a 'domain'.  In
+        addition to specifying the username and domain separately, they may be
+        specified together by using the addr_spec keyword *instead of* the
+        username and domain keywords.  If an addr_spec string is specified it
+        must be properly quoted according to RFC 5322 rules; an error will be
+        raised if it is not.
+
+        An Address object has display_name, username, domain, and addr_spec
+        attributes, all of which are read-only.  The addr_spec and the string
+        value of the object are both quoted according to RFC5322 rules, but
+        without any Content Transfer Encoding.
+
+        """
+        # This clause with its potential 'raise' may only happen when an
+        # application program creates an Address object using an addr_spec
+        # keyword.  The email library code itself must always supply username
+        # and domain.
+        if addr_spec is not None:
+            if username or domain:
+                raise TypeError("addrspec specified when username and/or "
+                                "domain also specified")
+            a_s, rest = parser.get_addr_spec(addr_spec)
+            if rest:
+                raise ValueError("Invalid addr_spec; only '{}' "
+                                 "could be parsed from '{}'".format(
+                                    a_s, addr_spec))
+            if a_s.all_defects:
+                raise a_s.all_defects[0]
+            username = a_s.local_part
+            domain = a_s.domain
+        self._display_name = display_name
+        self._username = username
+        self._domain = domain
+
+    @property
+    def display_name(self):
+        return self._display_name
+
+    @property
+    def username(self):
+        return self._username
+
+    @property
+    def domain(self):
+        return self._domain
+
+    @property
+    def addr_spec(self):
+        """The addr_spec (username@domain) portion of the address, quoted
+        according to RFC 5322 rules, but with no Content Transfer Encoding.
+        """
+        nameset = set(self.username)
+        if len(nameset) > len(nameset-parser.DOT_ATOM_ENDS):
+            lp = parser.quote_string(self.username)
+        else:
+            lp = self.username
+        if self.domain:
+            return lp + '@' + self.domain
+        if not lp:
+            return '<>'
+        return lp
+
+    def __repr__(self):
+        return "Address(display_name={!r}, username={!r}, domain={!r})".format(
+                        self.display_name, self.username, self.domain)
+
+    def __str__(self):
+        nameset = set(self.display_name)
+        if len(nameset) > len(nameset-parser.SPECIALS):
+            disp = parser.quote_string(self.display_name)
+        else:
+            disp = self.display_name
+        if disp:
+            addr_spec = '' if self.addr_spec=='<>' else self.addr_spec
+            return "{} <{}>".format(disp, addr_spec)
+        return self.addr_spec
+
+    def __eq__(self, other):
+        if type(other) != type(self):
+            return False
+        return (self.display_name == other.display_name and
+                self.username == other.username and
+                self.domain == other.domain)
+
+
+class Group(object):
+
+    def __init__(self, display_name=None, addresses=None):
+        """Create an object representing an address group.
+
+        An address group consists of a display_name followed by colon and an
+        list of addresses (see Address) terminated by a semi-colon.  The Group
+        is created by specifying a display_name and a possibly empty list of
+        Address objects.  A Group can also be used to represent a single
+        address that is not in a group, which is convenient when manipulating
+        lists that are a combination of Groups and individual Addresses.  In
+        this case the display_name should be set to None.  In particular, the
+        string representation of a Group whose display_name is None is the same
+        as the Address object, if there is one and only one Address object in
+        the addresses list.
+
+        """
+        self._display_name = display_name
+        self._addresses = tuple(addresses) if addresses else tuple()
+
+    @property
+    def display_name(self):
+        return self._display_name
+
+    @property
+    def addresses(self):
+        return self._addresses
+
+    def __repr__(self):
+        return "Group(display_name={!r}, addresses={!r}".format(
+                 self.display_name, self.addresses)
+
+    def __str__(self):
+        if self.display_name is None and len(self.addresses)==1:
+            return str(self.addresses[0])
+        disp = self.display_name
+        if disp is not None:
+            nameset = set(disp)
+            if len(nameset) > len(nameset-parser.SPECIALS):
+                disp = parser.quote_string(disp)
+        adrstr = ", ".join(str(x) for x in self.addresses)
+        adrstr = ' ' + adrstr if adrstr else adrstr
+        return "{}:{};".format(disp, adrstr)
+
+    def __eq__(self, other):
+        if type(other) != type(self):
+            return False
+        return (self.display_name == other.display_name and
+                self.addresses == other.addresses)
+
+
+# Header Classes #
+
+class BaseHeader(str):
+
+    """Base class for message headers.
+
+    Implements generic behavior and provides tools for subclasses.
+
+    A subclass must define a classmethod named 'parse' that takes an unfolded
+    value string and a dictionary as its arguments.  The dictionary will
+    contain one key, 'defects', initialized to an empty list.  After the call
+    the dictionary must contain two additional keys: parse_tree, set to the
+    parse tree obtained from parsing the header, and 'decoded', set to the
+    string value of the idealized representation of the data from the value.
+    (That is, encoded words are decoded, and values that have canonical
+    representations are so represented.)
+
+    The defects key is intended to collect parsing defects, which the message
+    parser will subsequently dispose of as appropriate.  The parser should not,
+    insofar as practical, raise any errors.  Defects should be added to the
+    list instead.  The standard header parsers register defects for RFC
+    compliance issues, for obsolete RFC syntax, and for unrecoverable parsing
+    errors.
+
+    The parse method may add additional keys to the dictionary.  In this case
+    the subclass must define an 'init' method, which will be passed the
+    dictionary as its keyword arguments.  The method should use (usually by
+    setting them as the value of similarly named attributes) and remove all the
+    extra keys added by its parse method, and then use super to call its parent
+    class with the remaining arguments and keywords.
+
+    The subclass should also make sure that a 'max_count' attribute is defined
+    that is either None or 1. XXX: need to better define this API.
+
+    """
+
+    def __new__(cls, name, value):
+        kwds = {'defects': []}
+        cls.parse(value, kwds)
+        if utils._has_surrogates(kwds['decoded']):
+            kwds['decoded'] = utils._sanitize(kwds['decoded'])
+        self = str.__new__(cls, kwds['decoded'])
+        # del kwds['decoded']
+        self.init(name, **kwds)
+        return self
+
+    def init(self, name, **_3to2kwargs):
+        defects = _3to2kwargs['defects']; del _3to2kwargs['defects']
+        parse_tree = _3to2kwargs['parse_tree']; del _3to2kwargs['parse_tree']
+        self._name = name
+        self._parse_tree = parse_tree
+        self._defects = defects
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def defects(self):
+        return tuple(self._defects)
+
+    def __reduce__(self):
+        return (
+            _reconstruct_header,
+            (
+                self.__class__.__name__,
+                self.__class__.__bases__,
+                str(self),
+            ),
+            self.__dict__)
+
+    @classmethod
+    def _reconstruct(cls, value):
+        return str.__new__(cls, value)
+
+    def fold(self, **_3to2kwargs):
+        policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
+        """Fold header according to policy.
+
+        The parsed representation of the header is folded according to
+        RFC5322 rules, as modified by the policy.  If the parse tree
+        contains surrogateescaped bytes, the bytes are CTE encoded using
+        the charset 'unknown-8bit".
+
+        Any non-ASCII characters in the parse tree are CTE encoded using
+        charset utf-8. XXX: make this a policy setting.
+
+        The returned value is an ASCII-only string possibly containing linesep
+        characters, and ending with a linesep character.  The string includes
+        the header name and the ': ' separator.
+
+        """
+        # At some point we need to only put fws here if it was in the source.
+        header = parser.Header([
+            parser.HeaderLabel([
+                parser.ValueTerminal(self.name, 'header-name'),
+                parser.ValueTerminal(':', 'header-sep')]),
+            parser.CFWSList([parser.WhiteSpaceTerminal(' ', 'fws')]),
+                             self._parse_tree])
+        return header.fold(policy=policy)
+
+
+def _reconstruct_header(cls_name, bases, value):
+    return type(text_to_native_str(cls_name), bases, {})._reconstruct(value)
+
+
+class UnstructuredHeader(object):
+
+    max_count = None
+    value_parser = staticmethod(parser.get_unstructured)
+
+    @classmethod
+    def parse(cls, value, kwds):
+        kwds['parse_tree'] = cls.value_parser(value)
+        kwds['decoded'] = str(kwds['parse_tree'])
+
+
+class UniqueUnstructuredHeader(UnstructuredHeader):
+
+    max_count = 1
+
+
+class DateHeader(object):
+
+    """Header whose value consists of a single timestamp.
+
+    Provides an additional attribute, datetime, which is either an aware
+    datetime using a timezone, or a naive datetime if the timezone
+    in the input string is -0000.  Also accepts a datetime as input.
+    The 'value' attribute is the normalized form of the timestamp,
+    which means it is the output of format_datetime on the datetime.
+    """
+
+    max_count = None
+
+    # This is used only for folding, not for creating 'decoded'.
+    value_parser = staticmethod(parser.get_unstructured)
+
+    @classmethod
+    def parse(cls, value, kwds):
+        if not value:
+            kwds['defects'].append(errors.HeaderMissingRequiredValue())
+            kwds['datetime'] = None
+            kwds['decoded'] = ''
+            kwds['parse_tree'] = parser.TokenList()
+            return
+        if isinstance(value, str):
+            value = utils.parsedate_to_datetime(value)
+        kwds['datetime'] = value
+        kwds['decoded'] = utils.format_datetime(kwds['datetime'])
+        kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
+
+    def init(self, *args, **kw):
+        self._datetime = kw.pop('datetime')
+        super().init(*args, **kw)
+
+    @property
+    def datetime(self):
+        return self._datetime
+
+
+class UniqueDateHeader(DateHeader):
+
+    max_count = 1
+
+
+class AddressHeader(object):
+
+    max_count = None
+
+    @staticmethod
+    def value_parser(value):
+        address_list, value = parser.get_address_list(value)
+        assert not value, 'this should not happen'
+        return address_list
+
+    @classmethod
+    def parse(cls, value, kwds):
+        if isinstance(value, str):
+            # We are translating here from the RFC language (address/mailbox)
+            # to our API language (group/address).
+            kwds['parse_tree'] = address_list = cls.value_parser(value)
+            groups = []
+            for addr in address_list.addresses:
+                groups.append(Group(addr.display_name,
+                                    [Address(mb.display_name or '',
+                                             mb.local_part or '',
+                                             mb.domain or '')
+                                     for mb in addr.all_mailboxes]))
+            defects = list(address_list.all_defects)
+        else:
+            # Assume it is Address/Group stuff
+            if not hasattr(value, '__iter__'):
+                value = [value]
+            groups = [Group(None, [item]) if not hasattr(item, 'addresses')
+                                          else item
+                                    for item in value]
+            defects = []
+        kwds['groups'] = groups
+        kwds['defects'] = defects
+        kwds['decoded'] = ', '.join([str(item) for item in groups])
+        if 'parse_tree' not in kwds:
+            kwds['parse_tree'] = cls.value_parser(kwds['decoded'])
+
+    def init(self, *args, **kw):
+        self._groups = tuple(kw.pop('groups'))
+        self._addresses = None
+        super().init(*args, **kw)
+
+    @property
+    def groups(self):
+        return self._groups
+
+    @property
+    def addresses(self):
+        if self._addresses is None:
+            self._addresses = tuple([address for group in self._groups
+                                             for address in group.addresses])
+        return self._addresses
+
+
+class UniqueAddressHeader(AddressHeader):
+
+    max_count = 1
+
+
+class SingleAddressHeader(AddressHeader):
+
+    @property
+    def address(self):
+        if len(self.addresses)!=1:
+            raise ValueError(("value of single address header {} is not "
+                "a single address").format(self.name))
+        return self.addresses[0]
+
+
+class UniqueSingleAddressHeader(SingleAddressHeader):
+
+    max_count = 1
+
+
+class MIMEVersionHeader(object):
+
+    max_count = 1
+
+    value_parser = staticmethod(parser.parse_mime_version)
+
+    @classmethod
+    def parse(cls, value, kwds):
+        kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+        kwds['decoded'] = str(parse_tree)
+        kwds['defects'].extend(parse_tree.all_defects)
+        kwds['major'] = None if parse_tree.minor is None else parse_tree.major
+        kwds['minor'] = parse_tree.minor
+        if parse_tree.minor is not None:
+            kwds['version'] = '{}.{}'.format(kwds['major'], kwds['minor'])
+        else:
+            kwds['version'] = None
+
+    def init(self, *args, **kw):
+        self._version = kw.pop('version')
+        self._major = kw.pop('major')
+        self._minor = kw.pop('minor')
+        super().init(*args, **kw)
+
+    @property
+    def major(self):
+        return self._major
+
+    @property
+    def minor(self):
+        return self._minor
+
+    @property
+    def version(self):
+        return self._version
+
+
+class ParameterizedMIMEHeader(object):
+
+    # Mixin that handles the params dict.  Must be subclassed and
+    # a property value_parser for the specific header provided.
+
+    max_count = 1
+
+    @classmethod
+    def parse(cls, value, kwds):
+        kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+        kwds['decoded'] = str(parse_tree)
+        kwds['defects'].extend(parse_tree.all_defects)
+        if parse_tree.params is None:
+            kwds['params'] = {}
+        else:
+            # The MIME RFCs specify that parameter ordering is arbitrary.
+            kwds['params'] = dict((utils._sanitize(name).lower(),
+                                   utils._sanitize(value))
+                                  for name, value in parse_tree.params)
+
+    def init(self, *args, **kw):
+        self._params = kw.pop('params')
+        super().init(*args, **kw)
+
+    @property
+    def params(self):
+        return self._params.copy()
+
+
+class ContentTypeHeader(ParameterizedMIMEHeader):
+
+    value_parser = staticmethod(parser.parse_content_type_header)
+
+    def init(self, *args, **kw):
+        super().init(*args, **kw)
+        self._maintype = utils._sanitize(self._parse_tree.maintype)
+        self._subtype = utils._sanitize(self._parse_tree.subtype)
+
+    @property
+    def maintype(self):
+        return self._maintype
+
+    @property
+    def subtype(self):
+        return self._subtype
+
+    @property
+    def content_type(self):
+        return self.maintype + '/' + self.subtype
+
+
+class ContentDispositionHeader(ParameterizedMIMEHeader):
+
+    value_parser = staticmethod(parser.parse_content_disposition_header)
+
+    def init(self, *args, **kw):
+        super().init(*args, **kw)
+        cd = self._parse_tree.content_disposition
+        self._content_disposition = cd if cd is None else utils._sanitize(cd)
+
+    @property
+    def content_disposition(self):
+        return self._content_disposition
+
+
+class ContentTransferEncodingHeader(object):
+
+    max_count = 1
+
+    value_parser = staticmethod(parser.parse_content_transfer_encoding_header)
+
+    @classmethod
+    def parse(cls, value, kwds):
+        kwds['parse_tree'] = parse_tree = cls.value_parser(value)
+        kwds['decoded'] = str(parse_tree)
+        kwds['defects'].extend(parse_tree.all_defects)
+
+    def init(self, *args, **kw):
+        super().init(*args, **kw)
+        self._cte = utils._sanitize(self._parse_tree.cte)
+
+    @property
+    def cte(self):
+        return self._cte
+
+
+# The header factory #
+
+_default_header_map = {
+    'subject':                      UniqueUnstructuredHeader,
+    'date':                         UniqueDateHeader,
+    'resent-date':                  DateHeader,
+    'orig-date':                    UniqueDateHeader,
+    'sender':                       UniqueSingleAddressHeader,
+    'resent-sender':                SingleAddressHeader,
+    'to':                           UniqueAddressHeader,
+    'resent-to':                    AddressHeader,
+    'cc':                           UniqueAddressHeader,
+    'resent-cc':                    AddressHeader,
+    'bcc':                          UniqueAddressHeader,
+    'resent-bcc':                   AddressHeader,
+    'from':                         UniqueAddressHeader,
+    'resent-from':                  AddressHeader,
+    'reply-to':                     UniqueAddressHeader,
+    'mime-version':                 MIMEVersionHeader,
+    'content-type':                 ContentTypeHeader,
+    'content-disposition':          ContentDispositionHeader,
+    'content-transfer-encoding':    ContentTransferEncodingHeader,
+    }
+
+class HeaderRegistry(object):
+
+    """A header_factory and header registry."""
+
+    def __init__(self, base_class=BaseHeader, default_class=UnstructuredHeader,
+                       use_default_map=True):
+        """Create a header_factory that works with the Policy API.
+
+        base_class is the class that will be the last class in the created
+        header class's __bases__ list.  default_class is the class that will be
+        used if "name" (see __call__) does not appear in the registry.
+        use_default_map controls whether or not the default mapping of names to
+        specialized classes is copied in to the registry when the factory is
+        created.  The default is True.
+
+        """
+        self.registry = {}
+        self.base_class = base_class
+        self.default_class = default_class
+        if use_default_map:
+            self.registry.update(_default_header_map)
+
+    def map_to_type(self, name, cls):
+        """Register cls as the specialized class for handling "name" headers.
+
+        """
+        self.registry[name.lower()] = cls
+
+    def __getitem__(self, name):
+        cls = self.registry.get(name.lower(), self.default_class)
+        return type(text_to_native_str('_'+cls.__name__), (cls, self.base_class), {})
+
+    def __call__(self, name, value):
+        """Create a header instance for header 'name' from 'value'.
+
+        Creates a header instance by creating a specialized class for parsing
+        and representing the specified header by combining the factory
+        base_class with a specialized class from the registry or the
+        default_class, and passing the name and value to the constructed
+        class's constructor.
+
+        """
+        return self[name](name, value)

+ 74 - 0
Lib/site-packages/future/backports/email/iterators.py

@@ -0,0 +1,74 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Various types of useful iterators and generators."""
+from __future__ import print_function
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = [
+    'body_line_iterator',
+    'typed_subpart_iterator',
+    'walk',
+    # Do not include _structure() since it's part of the debugging API.
+    ]
+
+import sys
+from io import StringIO
+
+
+# This function will become a method of the Message class
+def walk(self):
+    """Walk over the message tree, yielding each subpart.
+
+    The walk is performed in depth-first order.  This method is a
+    generator.
+    """
+    yield self
+    if self.is_multipart():
+        for subpart in self.get_payload():
+            for subsubpart in subpart.walk():
+                yield subsubpart
+
+
+# These two functions are imported into the Iterators.py interface module.
+def body_line_iterator(msg, decode=False):
+    """Iterate over the parts, returning string payloads line-by-line.
+
+    Optional decode (default False) is passed through to .get_payload().
+    """
+    for subpart in msg.walk():
+        payload = subpart.get_payload(decode=decode)
+        if isinstance(payload, str):
+            for line in StringIO(payload):
+                yield line
+
+
+def typed_subpart_iterator(msg, maintype='text', subtype=None):
+    """Iterate over the subparts with a given MIME type.
+
+    Use `maintype' as the main MIME type to match against; this defaults to
+    "text".  Optional `subtype' is the MIME subtype to match against; if
+    omitted, only the main type is matched.
+    """
+    for subpart in msg.walk():
+        if subpart.get_content_maintype() == maintype:
+            if subtype is None or subpart.get_content_subtype() == subtype:
+                yield subpart
+
+
+def _structure(msg, fp=None, level=0, include_default=False):
+    """A handy debugging aid"""
+    if fp is None:
+        fp = sys.stdout
+    tab = ' ' * (level * 4)
+    print(tab + msg.get_content_type(), end='', file=fp)
+    if include_default:
+        print(' [%s]' % msg.get_default_type(), file=fp)
+    else:
+        print(file=fp)
+    if msg.is_multipart():
+        for subpart in msg.get_payload():
+            _structure(subpart, fp, level+1, include_default)

+ 882 - 0
Lib/site-packages/future/backports/email/message.py

@@ -0,0 +1,882 @@
+# -*- coding: utf-8 -*-
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Basic message object for the email package object model."""
+from __future__ import absolute_import, division, unicode_literals
+from future.builtins import list, range, str, zip
+
+__all__ = ['Message']
+
+import re
+import uu
+import base64
+import binascii
+from io import BytesIO, StringIO
+
+# Intrapackage imports
+from future.utils import as_native_str
+from future.backports.email import utils
+from future.backports.email import errors
+from future.backports.email._policybase import compat32
+from future.backports.email import charset as _charset
+from future.backports.email._encoded_words import decode_b
+Charset = _charset.Charset
+
+SEMISPACE = '; '
+
+# Regular expression that matches `special' characters in parameters, the
+# existence of which force quoting of the parameter value.
+tspecials = re.compile(r'[ \(\)<>@,;:\\"/\[\]\?=]')
+
+
+def _splitparam(param):
+    # Split header parameters.  BAW: this may be too simple.  It isn't
+    # strictly RFC 2045 (section 5.1) compliant, but it catches most headers
+    # found in the wild.  We may eventually need a full fledged parser.
+    # RDM: we might have a Header here; for now just stringify it.
+    a, sep, b = str(param).partition(';')
+    if not sep:
+        return a.strip(), None
+    return a.strip(), b.strip()
+
+def _formatparam(param, value=None, quote=True):
+    """Convenience function to format and return a key=value pair.
+
+    This will quote the value if needed or if quote is true.  If value is a
+    three tuple (charset, language, value), it will be encoded according
+    to RFC2231 rules.  If it contains non-ascii characters it will likewise
+    be encoded according to RFC2231 rules, using the utf-8 charset and
+    a null language.
+    """
+    if value is not None and len(value) > 0:
+        # A tuple is used for RFC 2231 encoded parameter values where items
+        # are (charset, language, value).  charset is a string, not a Charset
+        # instance.  RFC 2231 encoded values are never quoted, per RFC.
+        if isinstance(value, tuple):
+            # Encode as per RFC 2231
+            param += '*'
+            value = utils.encode_rfc2231(value[2], value[0], value[1])
+            return '%s=%s' % (param, value)
+        else:
+            try:
+                value.encode('ascii')
+            except UnicodeEncodeError:
+                param += '*'
+                value = utils.encode_rfc2231(value, 'utf-8', '')
+                return '%s=%s' % (param, value)
+        # BAW: Please check this.  I think that if quote is set it should
+        # force quoting even if not necessary.
+        if quote or tspecials.search(value):
+            return '%s="%s"' % (param, utils.quote(value))
+        else:
+            return '%s=%s' % (param, value)
+    else:
+        return param
+
+def _parseparam(s):
+    # RDM This might be a Header, so for now stringify it.
+    s = ';' + str(s)
+    plist = []
+    while s[:1] == ';':
+        s = s[1:]
+        end = s.find(';')
+        while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2:
+            end = s.find(';', end + 1)
+        if end < 0:
+            end = len(s)
+        f = s[:end]
+        if '=' in f:
+            i = f.index('=')
+            f = f[:i].strip().lower() + '=' + f[i+1:].strip()
+        plist.append(f.strip())
+        s = s[end:]
+    return plist
+
+
+def _unquotevalue(value):
+    # This is different than utils.collapse_rfc2231_value() because it doesn't
+    # try to convert the value to a unicode.  Message.get_param() and
+    # Message.get_params() are both currently defined to return the tuple in
+    # the face of RFC 2231 parameters.
+    if isinstance(value, tuple):
+        return value[0], value[1], utils.unquote(value[2])
+    else:
+        return utils.unquote(value)
+
+
+class Message(object):
+    """Basic message object.
+
+    A message object is defined as something that has a bunch of RFC 2822
+    headers and a payload.  It may optionally have an envelope header
+    (a.k.a. Unix-From or From_ header).  If the message is a container (i.e. a
+    multipart or a message/rfc822), then the payload is a list of Message
+    objects, otherwise it is a string.
+
+    Message objects implement part of the `mapping' interface, which assumes
+    there is exactly one occurrence of the header per message.  Some headers
+    do in fact appear multiple times (e.g. Received) and for those headers,
+    you must use the explicit API to set or get all the headers.  Not all of
+    the mapping methods are implemented.
+    """
+    def __init__(self, policy=compat32):
+        self.policy = policy
+        self._headers = list()
+        self._unixfrom = None
+        self._payload = None
+        self._charset = None
+        # Defaults for multipart messages
+        self.preamble = self.epilogue = None
+        self.defects = []
+        # Default content type
+        self._default_type = 'text/plain'
+
+    @as_native_str(encoding='utf-8')
+    def __str__(self):
+        """Return the entire formatted message as a string.
+        This includes the headers, body, and envelope header.
+        """
+        return self.as_string()
+
+    def as_string(self, unixfrom=False, maxheaderlen=0):
+        """Return the entire formatted message as a (unicode) string.
+        Optional `unixfrom' when True, means include the Unix From_ envelope
+        header.
+
+        This is a convenience method and may not generate the message exactly
+        as you intend.  For more flexibility, use the flatten() method of a
+        Generator instance.
+        """
+        from future.backports.email.generator import Generator
+        fp = StringIO()
+        g = Generator(fp, mangle_from_=False, maxheaderlen=maxheaderlen)
+        g.flatten(self, unixfrom=unixfrom)
+        return fp.getvalue()
+
+    def is_multipart(self):
+        """Return True if the message consists of multiple parts."""
+        return isinstance(self._payload, list)
+
+    #
+    # Unix From_ line
+    #
+    def set_unixfrom(self, unixfrom):
+        self._unixfrom = unixfrom
+
+    def get_unixfrom(self):
+        return self._unixfrom
+
+    #
+    # Payload manipulation.
+    #
+    def attach(self, payload):
+        """Add the given payload to the current payload.
+
+        The current payload will always be a list of objects after this method
+        is called.  If you want to set the payload to a scalar object, use
+        set_payload() instead.
+        """
+        if self._payload is None:
+            self._payload = [payload]
+        else:
+            self._payload.append(payload)
+
+    def get_payload(self, i=None, decode=False):
+        """Return a reference to the payload.
+
+        The payload will either be a list object or a string.  If you mutate
+        the list object, you modify the message's payload in place.  Optional
+        i returns that index into the payload.
+
+        Optional decode is a flag indicating whether the payload should be
+        decoded or not, according to the Content-Transfer-Encoding header
+        (default is False).
+
+        When True and the message is not a multipart, the payload will be
+        decoded if this header's value is `quoted-printable' or `base64'.  If
+        some other encoding is used, or the header is missing, or if the
+        payload has bogus data (i.e. bogus base64 or uuencoded data), the
+        payload is returned as-is.
+
+        If the message is a multipart and the decode flag is True, then None
+        is returned.
+        """
+        # Here is the logic table for this code, based on the email5.0.0 code:
+        #   i     decode  is_multipart  result
+        # ------  ------  ------------  ------------------------------
+        #  None   True    True          None
+        #   i     True    True          None
+        #  None   False   True          _payload (a list)
+        #   i     False   True          _payload element i (a Message)
+        #   i     False   False         error (not a list)
+        #   i     True    False         error (not a list)
+        #  None   False   False         _payload
+        #  None   True    False         _payload decoded (bytes)
+        # Note that Barry planned to factor out the 'decode' case, but that
+        # isn't so easy now that we handle the 8 bit data, which needs to be
+        # converted in both the decode and non-decode path.
+        if self.is_multipart():
+            if decode:
+                return None
+            if i is None:
+                return self._payload
+            else:
+                return self._payload[i]
+        # For backward compatibility, Use isinstance and this error message
+        # instead of the more logical is_multipart test.
+        if i is not None and not isinstance(self._payload, list):
+            raise TypeError('Expected list, got %s' % type(self._payload))
+        payload = self._payload
+        # cte might be a Header, so for now stringify it.
+        cte = str(self.get('content-transfer-encoding', '')).lower()
+        # payload may be bytes here.
+        if isinstance(payload, str):
+            payload = str(payload)    # for Python-Future, so surrogateescape works
+            if utils._has_surrogates(payload):
+                bpayload = payload.encode('ascii', 'surrogateescape')
+                if not decode:
+                    try:
+                        payload = bpayload.decode(self.get_param('charset', 'ascii'), 'replace')
+                    except LookupError:
+                        payload = bpayload.decode('ascii', 'replace')
+            elif decode:
+                try:
+                    bpayload = payload.encode('ascii')
+                except UnicodeError:
+                    # This won't happen for RFC compliant messages (messages
+                    # containing only ASCII codepoints in the unicode input).
+                    # If it does happen, turn the string into bytes in a way
+                    # guaranteed not to fail.
+                    bpayload = payload.encode('raw-unicode-escape')
+        if not decode:
+            return payload
+        if cte == 'quoted-printable':
+            return utils._qdecode(bpayload)
+        elif cte == 'base64':
+            # XXX: this is a bit of a hack; decode_b should probably be factored
+            # out somewhere, but I haven't figured out where yet.
+            value, defects = decode_b(b''.join(bpayload.splitlines()))
+            for defect in defects:
+                self.policy.handle_defect(self, defect)
+            return value
+        elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
+            in_file = BytesIO(bpayload)
+            out_file = BytesIO()
+            try:
+                uu.decode(in_file, out_file, quiet=True)
+                return out_file.getvalue()
+            except uu.Error:
+                # Some decoding problem
+                return bpayload
+        if isinstance(payload, str):
+            return bpayload
+        return payload
+
+    def set_payload(self, payload, charset=None):
+        """Set the payload to the given value.
+
+        Optional charset sets the message's default character set.  See
+        set_charset() for details.
+        """
+        self._payload = payload
+        if charset is not None:
+            self.set_charset(charset)
+
+    def set_charset(self, charset):
+        """Set the charset of the payload to a given character set.
+
+        charset can be a Charset instance, a string naming a character set, or
+        None.  If it is a string it will be converted to a Charset instance.
+        If charset is None, the charset parameter will be removed from the
+        Content-Type field.  Anything else will generate a TypeError.
+
+        The message will be assumed to be of type text/* encoded with
+        charset.input_charset.  It will be converted to charset.output_charset
+        and encoded properly, if needed, when generating the plain text
+        representation of the message.  MIME headers (MIME-Version,
+        Content-Type, Content-Transfer-Encoding) will be added as needed.
+        """
+        if charset is None:
+            self.del_param('charset')
+            self._charset = None
+            return
+        if not isinstance(charset, Charset):
+            charset = Charset(charset)
+        self._charset = charset
+        if 'MIME-Version' not in self:
+            self.add_header('MIME-Version', '1.0')
+        if 'Content-Type' not in self:
+            self.add_header('Content-Type', 'text/plain',
+                            charset=charset.get_output_charset())
+        else:
+            self.set_param('charset', charset.get_output_charset())
+        if charset != charset.get_output_charset():
+            self._payload = charset.body_encode(self._payload)
+        if 'Content-Transfer-Encoding' not in self:
+            cte = charset.get_body_encoding()
+            try:
+                cte(self)
+            except TypeError:
+                self._payload = charset.body_encode(self._payload)
+                self.add_header('Content-Transfer-Encoding', cte)
+
+    def get_charset(self):
+        """Return the Charset instance associated with the message's payload.
+        """
+        return self._charset
+
+    #
+    # MAPPING INTERFACE (partial)
+    #
+    def __len__(self):
+        """Return the total number of headers, including duplicates."""
+        return len(self._headers)
+
+    def __getitem__(self, name):
+        """Get a header value.
+
+        Return None if the header is missing instead of raising an exception.
+
+        Note that if the header appeared multiple times, exactly which
+        occurrence gets returned is undefined.  Use get_all() to get all
+        the values matching a header field name.
+        """
+        return self.get(name)
+
+    def __setitem__(self, name, val):
+        """Set the value of a header.
+
+        Note: this does not overwrite an existing header with the same field
+        name.  Use __delitem__() first to delete any existing headers.
+        """
+        max_count = self.policy.header_max_count(name)
+        if max_count:
+            lname = name.lower()
+            found = 0
+            for k, v in self._headers:
+                if k.lower() == lname:
+                    found += 1
+                    if found >= max_count:
+                        raise ValueError("There may be at most {} {} headers "
+                                         "in a message".format(max_count, name))
+        self._headers.append(self.policy.header_store_parse(name, val))
+
+    def __delitem__(self, name):
+        """Delete all occurrences of a header, if present.
+
+        Does not raise an exception if the header is missing.
+        """
+        name = name.lower()
+        newheaders = list()
+        for k, v in self._headers:
+            if k.lower() != name:
+                newheaders.append((k, v))
+        self._headers = newheaders
+
+    def __contains__(self, name):
+        return name.lower() in [k.lower() for k, v in self._headers]
+
+    def __iter__(self):
+        for field, value in self._headers:
+            yield field
+
+    def keys(self):
+        """Return a list of all the message's header field names.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [k for k, v in self._headers]
+
+    def values(self):
+        """Return a list of all the message's header values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [self.policy.header_fetch_parse(k, v)
+                for k, v in self._headers]
+
+    def items(self):
+        """Get all the message's header fields and values.
+
+        These will be sorted in the order they appeared in the original
+        message, or were added to the message, and may contain duplicates.
+        Any fields deleted and re-inserted are always appended to the header
+        list.
+        """
+        return [(k, self.policy.header_fetch_parse(k, v))
+                for k, v in self._headers]
+
+    def get(self, name, failobj=None):
+        """Get a header value.
+
+        Like __getitem__() but return failobj instead of None when the field
+        is missing.
+        """
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                return self.policy.header_fetch_parse(k, v)
+        return failobj
+
+    #
+    # "Internal" methods (public API, but only intended for use by a parser
+    # or generator, not normal application code.
+    #
+
+    def set_raw(self, name, value):
+        """Store name and value in the model without modification.
+
+        This is an "internal" API, intended only for use by a parser.
+        """
+        self._headers.append((name, value))
+
+    def raw_items(self):
+        """Return the (name, value) header pairs without modification.
+
+        This is an "internal" API, intended only for use by a generator.
+        """
+        return iter(self._headers.copy())
+
+    #
+    # Additional useful stuff
+    #
+
+    def get_all(self, name, failobj=None):
+        """Return a list of all the values for the named field.
+
+        These will be sorted in the order they appeared in the original
+        message, and may contain duplicates.  Any fields deleted and
+        re-inserted are always appended to the header list.
+
+        If no such fields exist, failobj is returned (defaults to None).
+        """
+        values = []
+        name = name.lower()
+        for k, v in self._headers:
+            if k.lower() == name:
+                values.append(self.policy.header_fetch_parse(k, v))
+        if not values:
+            return failobj
+        return values
+
+    def add_header(self, _name, _value, **_params):
+        """Extended header setting.
+
+        name is the header field to add.  keyword arguments can be used to set
+        additional parameters for the header field, with underscores converted
+        to dashes.  Normally the parameter will be added as key="value" unless
+        value is None, in which case only the key will be added.  If a
+        parameter value contains non-ASCII characters it can be specified as a
+        three-tuple of (charset, language, value), in which case it will be
+        encoded according to RFC2231 rules.  Otherwise it will be encoded using
+        the utf-8 charset and a language of ''.
+
+        Examples:
+
+        msg.add_header('content-disposition', 'attachment', filename='bud.gif')
+        msg.add_header('content-disposition', 'attachment',
+                       filename=('utf-8', '', 'Fußballer.ppt'))
+        msg.add_header('content-disposition', 'attachment',
+                       filename='Fußballer.ppt'))
+        """
+        parts = []
+        for k, v in _params.items():
+            if v is None:
+                parts.append(k.replace('_', '-'))
+            else:
+                parts.append(_formatparam(k.replace('_', '-'), v))
+        if _value is not None:
+            parts.insert(0, _value)
+        self[_name] = SEMISPACE.join(parts)
+
+    def replace_header(self, _name, _value):
+        """Replace a header.
+
+        Replace the first matching header found in the message, retaining
+        header order and case.  If no matching header was found, a KeyError is
+        raised.
+        """
+        _name = _name.lower()
+        for i, (k, v) in zip(range(len(self._headers)), self._headers):
+            if k.lower() == _name:
+                self._headers[i] = self.policy.header_store_parse(k, _value)
+                break
+        else:
+            raise KeyError(_name)
+
+    #
+    # Use these three methods instead of the three above.
+    #
+
+    def get_content_type(self):
+        """Return the message's content type.
+
+        The returned string is coerced to lower case of the form
+        `maintype/subtype'.  If there was no Content-Type header in the
+        message, the default type as given by get_default_type() will be
+        returned.  Since according to RFC 2045, messages always have a default
+        type this will always return a value.
+
+        RFC 2045 defines a message's default type to be text/plain unless it
+        appears inside a multipart/digest container, in which case it would be
+        message/rfc822.
+        """
+        missing = object()
+        value = self.get('content-type', missing)
+        if value is missing:
+            # This should have no parameters
+            return self.get_default_type()
+        ctype = _splitparam(value)[0].lower()
+        # RFC 2045, section 5.2 says if its invalid, use text/plain
+        if ctype.count('/') != 1:
+            return 'text/plain'
+        return ctype
+
+    def get_content_maintype(self):
+        """Return the message's main content type.
+
+        This is the `maintype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[0]
+
+    def get_content_subtype(self):
+        """Returns the message's sub-content type.
+
+        This is the `subtype' part of the string returned by
+        get_content_type().
+        """
+        ctype = self.get_content_type()
+        return ctype.split('/')[1]
+
+    def get_default_type(self):
+        """Return the `default' content type.
+
+        Most messages have a default content type of text/plain, except for
+        messages that are subparts of multipart/digest containers.  Such
+        subparts have a default content type of message/rfc822.
+        """
+        return self._default_type
+
+    def set_default_type(self, ctype):
+        """Set the `default' content type.
+
+        ctype should be either "text/plain" or "message/rfc822", although this
+        is not enforced.  The default content type is not stored in the
+        Content-Type header.
+        """
+        self._default_type = ctype
+
+    def _get_params_preserve(self, failobj, header):
+        # Like get_params() but preserves the quoting of values.  BAW:
+        # should this be part of the public interface?
+        missing = object()
+        value = self.get(header, missing)
+        if value is missing:
+            return failobj
+        params = []
+        for p in _parseparam(value):
+            try:
+                name, val = p.split('=', 1)
+                name = name.strip()
+                val = val.strip()
+            except ValueError:
+                # Must have been a bare attribute
+                name = p.strip()
+                val = ''
+            params.append((name, val))
+        params = utils.decode_params(params)
+        return params
+
+    def get_params(self, failobj=None, header='content-type', unquote=True):
+        """Return the message's Content-Type parameters, as a list.
+
+        The elements of the returned list are 2-tuples of key/value pairs, as
+        split on the `=' sign.  The left hand side of the `=' is the key,
+        while the right hand side is the value.  If there is no `=' sign in
+        the parameter the value is the empty string.  The value is as
+        described in the get_param() method.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header.  Optional header is the header to search instead of
+        Content-Type.  If unquote is True, the value is unquoted.
+        """
+        missing = object()
+        params = self._get_params_preserve(missing, header)
+        if params is missing:
+            return failobj
+        if unquote:
+            return [(k, _unquotevalue(v)) for k, v in params]
+        else:
+            return params
+
+    def get_param(self, param, failobj=None, header='content-type',
+                  unquote=True):
+        """Return the parameter value if found in the Content-Type header.
+
+        Optional failobj is the object to return if there is no Content-Type
+        header, or the Content-Type header has no such parameter.  Optional
+        header is the header to search instead of Content-Type.
+
+        Parameter keys are always compared case insensitively.  The return
+        value can either be a string, or a 3-tuple if the parameter was RFC
+        2231 encoded.  When it's a 3-tuple, the elements of the value are of
+        the form (CHARSET, LANGUAGE, VALUE).  Note that both CHARSET and
+        LANGUAGE can be None, in which case you should consider VALUE to be
+        encoded in the us-ascii charset.  You can usually ignore LANGUAGE.
+        The parameter value (either the returned string, or the VALUE item in
+        the 3-tuple) is always unquoted, unless unquote is set to False.
+
+        If your application doesn't care whether the parameter was RFC 2231
+        encoded, it can turn the return value into a string as follows:
+
+            param = msg.get_param('foo')
+            param = email.utils.collapse_rfc2231_value(rawparam)
+
+        """
+        if header not in self:
+            return failobj
+        for k, v in self._get_params_preserve(failobj, header):
+            if k.lower() == param.lower():
+                if unquote:
+                    return _unquotevalue(v)
+                else:
+                    return v
+        return failobj
+
+    def set_param(self, param, value, header='Content-Type', requote=True,
+                  charset=None, language=''):
+        """Set a parameter in the Content-Type header.
+
+        If the parameter already exists in the header, its value will be
+        replaced with the new value.
+
+        If header is Content-Type and has not yet been defined for this
+        message, it will be set to "text/plain" and the new parameter and
+        value will be appended as per RFC 2045.
+
+        An alternate header can specified in the header argument, and all
+        parameters will be quoted as necessary unless requote is False.
+
+        If charset is specified, the parameter will be encoded according to RFC
+        2231.  Optional language specifies the RFC 2231 language, defaulting
+        to the empty string.  Both charset and language should be strings.
+        """
+        if not isinstance(value, tuple) and charset:
+            value = (charset, language, value)
+
+        if header not in self and header.lower() == 'content-type':
+            ctype = 'text/plain'
+        else:
+            ctype = self.get(header)
+        if not self.get_param(param, header=header):
+            if not ctype:
+                ctype = _formatparam(param, value, requote)
+            else:
+                ctype = SEMISPACE.join(
+                    [ctype, _formatparam(param, value, requote)])
+        else:
+            ctype = ''
+            for old_param, old_value in self.get_params(header=header,
+                                                        unquote=requote):
+                append_param = ''
+                if old_param.lower() == param.lower():
+                    append_param = _formatparam(param, value, requote)
+                else:
+                    append_param = _formatparam(old_param, old_value, requote)
+                if not ctype:
+                    ctype = append_param
+                else:
+                    ctype = SEMISPACE.join([ctype, append_param])
+        if ctype != self.get(header):
+            del self[header]
+            self[header] = ctype
+
+    def del_param(self, param, header='content-type', requote=True):
+        """Remove the given parameter completely from the Content-Type header.
+
+        The header will be re-written in place without the parameter or its
+        value. All values will be quoted as necessary unless requote is
+        False.  Optional header specifies an alternative to the Content-Type
+        header.
+        """
+        if header not in self:
+            return
+        new_ctype = ''
+        for p, v in self.get_params(header=header, unquote=requote):
+            if p.lower() != param.lower():
+                if not new_ctype:
+                    new_ctype = _formatparam(p, v, requote)
+                else:
+                    new_ctype = SEMISPACE.join([new_ctype,
+                                                _formatparam(p, v, requote)])
+        if new_ctype != self.get(header):
+            del self[header]
+            self[header] = new_ctype
+
+    def set_type(self, type, header='Content-Type', requote=True):
+        """Set the main type and subtype for the Content-Type header.
+
+        type must be a string in the form "maintype/subtype", otherwise a
+        ValueError is raised.
+
+        This method replaces the Content-Type header, keeping all the
+        parameters in place.  If requote is False, this leaves the existing
+        header's quoting as is.  Otherwise, the parameters will be quoted (the
+        default).
+
+        An alternative header can be specified in the header argument.  When
+        the Content-Type header is set, we'll always also add a MIME-Version
+        header.
+        """
+        # BAW: should we be strict?
+        if not type.count('/') == 1:
+            raise ValueError
+        # Set the Content-Type, you get a MIME-Version
+        if header.lower() == 'content-type':
+            del self['mime-version']
+            self['MIME-Version'] = '1.0'
+        if header not in self:
+            self[header] = type
+            return
+        params = self.get_params(header=header, unquote=requote)
+        del self[header]
+        self[header] = type
+        # Skip the first param; it's the old type.
+        for p, v in params[1:]:
+            self.set_param(p, v, header, requote)
+
+    def get_filename(self, failobj=None):
+        """Return the filename associated with the payload if present.
+
+        The filename is extracted from the Content-Disposition header's
+        `filename' parameter, and it is unquoted.  If that header is missing
+        the `filename' parameter, this method falls back to looking for the
+        `name' parameter.
+        """
+        missing = object()
+        filename = self.get_param('filename', missing, 'content-disposition')
+        if filename is missing:
+            filename = self.get_param('name', missing, 'content-type')
+        if filename is missing:
+            return failobj
+        return utils.collapse_rfc2231_value(filename).strip()
+
+    def get_boundary(self, failobj=None):
+        """Return the boundary associated with the payload if present.
+
+        The boundary is extracted from the Content-Type header's `boundary'
+        parameter, and it is unquoted.
+        """
+        missing = object()
+        boundary = self.get_param('boundary', missing)
+        if boundary is missing:
+            return failobj
+        # RFC 2046 says that boundaries may begin but not end in w/s
+        return utils.collapse_rfc2231_value(boundary).rstrip()
+
+    def set_boundary(self, boundary):
+        """Set the boundary parameter in Content-Type to 'boundary'.
+
+        This is subtly different than deleting the Content-Type header and
+        adding a new one with a new boundary parameter via add_header().  The
+        main difference is that using the set_boundary() method preserves the
+        order of the Content-Type header in the original message.
+
+        HeaderParseError is raised if the message has no Content-Type header.
+        """
+        missing = object()
+        params = self._get_params_preserve(missing, 'content-type')
+        if params is missing:
+            # There was no Content-Type header, and we don't know what type
+            # to set it to, so raise an exception.
+            raise errors.HeaderParseError('No Content-Type header found')
+        newparams = list()
+        foundp = False
+        for pk, pv in params:
+            if pk.lower() == 'boundary':
+                newparams.append(('boundary', '"%s"' % boundary))
+                foundp = True
+            else:
+                newparams.append((pk, pv))
+        if not foundp:
+            # The original Content-Type header had no boundary attribute.
+            # Tack one on the end.  BAW: should we raise an exception
+            # instead???
+            newparams.append(('boundary', '"%s"' % boundary))
+        # Replace the existing Content-Type header with the new value
+        newheaders = list()
+        for h, v in self._headers:
+            if h.lower() == 'content-type':
+                parts = list()
+                for k, v in newparams:
+                    if v == '':
+                        parts.append(k)
+                    else:
+                        parts.append('%s=%s' % (k, v))
+                val = SEMISPACE.join(parts)
+                newheaders.append(self.policy.header_store_parse(h, val))
+
+            else:
+                newheaders.append((h, v))
+        self._headers = newheaders
+
+    def get_content_charset(self, failobj=None):
+        """Return the charset parameter of the Content-Type header.
+
+        The returned string is always coerced to lower case.  If there is no
+        Content-Type header, or if that header has no charset parameter,
+        failobj is returned.
+        """
+        missing = object()
+        charset = self.get_param('charset', missing)
+        if charset is missing:
+            return failobj
+        if isinstance(charset, tuple):
+            # RFC 2231 encoded, so decode it, and it better end up as ascii.
+            pcharset = charset[0] or 'us-ascii'
+            try:
+                # LookupError will be raised if the charset isn't known to
+                # Python.  UnicodeError will be raised if the encoded text
+                # contains a character not in the charset.
+                as_bytes = charset[2].encode('raw-unicode-escape')
+                charset = str(as_bytes, pcharset)
+            except (LookupError, UnicodeError):
+                charset = charset[2]
+        # charset characters must be in us-ascii range
+        try:
+            charset.encode('us-ascii')
+        except UnicodeError:
+            return failobj
+        # RFC 2046, $4.1.2 says charsets are not case sensitive
+        return charset.lower()
+
+    def get_charsets(self, failobj=None):
+        """Return a list containing the charset(s) used in this message.
+
+        The returned list of items describes the Content-Type headers'
+        charset parameter for this message and all the subparts in its
+        payload.
+
+        Each item will either be a string (the value of the charset parameter
+        in the Content-Type header of that part) or the value of the
+        'failobj' parameter (defaults to None), if the part does not have a
+        main MIME type of "text", or the charset is not defined.
+
+        The list will contain one string for each part of the message, plus
+        one for the container message (i.e. self), so that a non-multipart
+        message will still return a list of length 1.
+        """
+        return [part.get_content_charset(failobj) for part in self.walk()]
+
+    # I.e. def walk(self): ...
+    from future.backports.email.iterators import walk

+ 0 - 0
Lib/site-packages/future/backports/email/mime/__init__.py


+ 39 - 0
Lib/site-packages/future/backports/email/mime/application.py

@@ -0,0 +1,39 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Keith Dart
+# Contact: email-sig@python.org
+
+"""Class representing application/* type MIME documents."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+from future.backports.email import encoders
+from future.backports.email.mime.nonmultipart import MIMENonMultipart
+
+__all__ = ["MIMEApplication"]
+
+
+class MIMEApplication(MIMENonMultipart):
+    """Class for generating application/* MIME documents."""
+
+    def __init__(self, _data, _subtype='octet-stream',
+                 _encoder=encoders.encode_base64, **_params):
+        """Create an application/* type MIME document.
+
+        _data is a string containing the raw application data.
+
+        _subtype is the MIME content type subtype, defaulting to
+        'octet-stream'.
+
+        _encoder is a function which will perform the actual encoding for
+        transport of the application data, defaulting to base64 encoding.
+
+        Any additional keyword arguments are passed to the base class
+        constructor, which turns them into parameters on the Content-Type
+        header.
+        """
+        if _subtype is None:
+            raise TypeError('Invalid application MIME subtype')
+        MIMENonMultipart.__init__(self, 'application', _subtype, **_params)
+        self.set_payload(_data)
+        _encoder(self)

+ 74 - 0
Lib/site-packages/future/backports/email/mime/audio.py

@@ -0,0 +1,74 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Anthony Baxter
+# Contact: email-sig@python.org
+
+"""Class representing audio/* type MIME documents."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMEAudio']
+
+import sndhdr
+
+from io import BytesIO
+from future.backports.email import encoders
+from future.backports.email.mime.nonmultipart import MIMENonMultipart
+
+
+_sndhdr_MIMEmap = {'au'  : 'basic',
+                   'wav' :'x-wav',
+                   'aiff':'x-aiff',
+                   'aifc':'x-aiff',
+                   }
+
+# There are others in sndhdr that don't have MIME types. :(
+# Additional ones to be added to sndhdr? midi, mp3, realaudio, wma??
+def _whatsnd(data):
+    """Try to identify a sound file type.
+
+    sndhdr.what() has a pretty cruddy interface, unfortunately.  This is why
+    we re-do it here.  It would be easier to reverse engineer the Unix 'file'
+    command and use the standard 'magic' file, as shipped with a modern Unix.
+    """
+    hdr = data[:512]
+    fakefile = BytesIO(hdr)
+    for testfn in sndhdr.tests:
+        res = testfn(hdr, fakefile)
+        if res is not None:
+            return _sndhdr_MIMEmap.get(res[0])
+    return None
+
+
+class MIMEAudio(MIMENonMultipart):
+    """Class for generating audio/* MIME documents."""
+
+    def __init__(self, _audiodata, _subtype=None,
+                 _encoder=encoders.encode_base64, **_params):
+        """Create an audio/* type MIME document.
+
+        _audiodata is a string containing the raw audio data.  If this data
+        can be decoded by the standard Python `sndhdr' module, then the
+        subtype will be automatically included in the Content-Type header.
+        Otherwise, you can specify  the specific audio subtype via the
+        _subtype parameter.  If _subtype is not given, and no subtype can be
+        guessed, a TypeError is raised.
+
+        _encoder is a function which will perform the actual encoding for
+        transport of the image data.  It takes one argument, which is this
+        Image instance.  It should use get_payload() and set_payload() to
+        change the payload to the encoded form.  It should also add any
+        Content-Transfer-Encoding or other headers to the message as
+        necessary.  The default encoding is Base64.
+
+        Any additional keyword arguments are passed to the base class
+        constructor, which turns them into parameters on the Content-Type
+        header.
+        """
+        if _subtype is None:
+            _subtype = _whatsnd(_audiodata)
+        if _subtype is None:
+            raise TypeError('Could not find audio MIME subtype')
+        MIMENonMultipart.__init__(self, 'audio', _subtype, **_params)
+        self.set_payload(_audiodata)
+        _encoder(self)

+ 25 - 0
Lib/site-packages/future/backports/email/mime/base.py

@@ -0,0 +1,25 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME specializations."""
+from __future__ import absolute_import, division, unicode_literals
+from future.backports.email import message
+
+__all__ = ['MIMEBase']
+
+
+class MIMEBase(message.Message):
+    """Base class for MIME specializations."""
+
+    def __init__(self, _maintype, _subtype, **_params):
+        """This constructor adds a Content-Type: and a MIME-Version: header.
+
+        The Content-Type: header is taken from the _maintype and _subtype
+        arguments.  Additional parameters for this header are taken from the
+        keyword arguments.
+        """
+        message.Message.__init__(self)
+        ctype = '%s/%s' % (_maintype, _subtype)
+        self.add_header('Content-Type', ctype, **_params)
+        self['MIME-Version'] = '1.0'

+ 48 - 0
Lib/site-packages/future/backports/email/mime/image.py

@@ -0,0 +1,48 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing image/* type MIME documents."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMEImage']
+
+import imghdr
+
+from future.backports.email import encoders
+from future.backports.email.mime.nonmultipart import MIMENonMultipart
+
+
+class MIMEImage(MIMENonMultipart):
+    """Class for generating image/* type MIME documents."""
+
+    def __init__(self, _imagedata, _subtype=None,
+                 _encoder=encoders.encode_base64, **_params):
+        """Create an image/* type MIME document.
+
+        _imagedata is a string containing the raw image data.  If this data
+        can be decoded by the standard Python `imghdr' module, then the
+        subtype will be automatically included in the Content-Type header.
+        Otherwise, you can specify the specific image subtype via the _subtype
+        parameter.
+
+        _encoder is a function which will perform the actual encoding for
+        transport of the image data.  It takes one argument, which is this
+        Image instance.  It should use get_payload() and set_payload() to
+        change the payload to the encoded form.  It should also add any
+        Content-Transfer-Encoding or other headers to the message as
+        necessary.  The default encoding is Base64.
+
+        Any additional keyword arguments are passed to the base class
+        constructor, which turns them into parameters on the Content-Type
+        header.
+        """
+        if _subtype is None:
+            _subtype = imghdr.what(None, _imagedata)
+        if _subtype is None:
+            raise TypeError('Could not guess image MIME subtype')
+        MIMENonMultipart.__init__(self, 'image', _subtype, **_params)
+        self.set_payload(_imagedata)
+        _encoder(self)

+ 36 - 0
Lib/site-packages/future/backports/email/mime/message.py

@@ -0,0 +1,36 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing message/* MIME documents."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMEMessage']
+
+from future.backports.email import message
+from future.backports.email.mime.nonmultipart import MIMENonMultipart
+
+
+class MIMEMessage(MIMENonMultipart):
+    """Class representing message/* MIME documents."""
+
+    def __init__(self, _msg, _subtype='rfc822'):
+        """Create a message/* type MIME document.
+
+        _msg is a message object and must be an instance of Message, or a
+        derived class of Message, otherwise a TypeError is raised.
+
+        Optional _subtype defines the subtype of the contained message.  The
+        default is "rfc822" (this is defined by the MIME standard, even though
+        the term "rfc822" is technically outdated by RFC 2822).
+        """
+        MIMENonMultipart.__init__(self, 'message', _subtype)
+        if not isinstance(_msg, message.Message):
+            raise TypeError('Argument is not an instance of Message')
+        # It's convenient to use this base class method.  We need to do it
+        # this way or we'll get an exception
+        message.Message.attach(self, _msg)
+        # And be sure our default type is set correctly
+        self.set_default_type('message/rfc822')

+ 49 - 0
Lib/site-packages/future/backports/email/mime/multipart.py

@@ -0,0 +1,49 @@
+# Copyright (C) 2002-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME multipart/* type messages."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMEMultipart']
+
+from future.backports.email.mime.base import MIMEBase
+
+
+class MIMEMultipart(MIMEBase):
+    """Base class for MIME multipart/* type messages."""
+
+    def __init__(self, _subtype='mixed', boundary=None, _subparts=None,
+                 **_params):
+        """Creates a multipart/* type message.
+
+        By default, creates a multipart/mixed message, with proper
+        Content-Type and MIME-Version headers.
+
+        _subtype is the subtype of the multipart content type, defaulting to
+        `mixed'.
+
+        boundary is the multipart boundary string.  By default it is
+        calculated as needed.
+
+        _subparts is a sequence of initial subparts for the payload.  It
+        must be an iterable object, such as a list.  You can always
+        attach new subparts to the message by using the attach() method.
+
+        Additional parameters for the Content-Type header are taken from the
+        keyword arguments (or passed into the _params argument).
+        """
+        MIMEBase.__init__(self, 'multipart', _subtype, **_params)
+
+        # Initialise _payload to an empty list as the Message superclass's
+        # implementation of is_multipart assumes that _payload is a list for
+        # multipart messages.
+        self._payload = []
+
+        if _subparts:
+            for p in _subparts:
+                self.attach(p)
+        if boundary:
+            self.set_boundary(boundary)

+ 24 - 0
Lib/site-packages/future/backports/email/mime/nonmultipart.py

@@ -0,0 +1,24 @@
+# Copyright (C) 2002-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Base class for MIME type messages that are not multipart."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMENonMultipart']
+
+from future.backports.email import errors
+from future.backports.email.mime.base import MIMEBase
+
+
+class MIMENonMultipart(MIMEBase):
+    """Base class for MIME multipart/* type messages."""
+
+    def attach(self, payload):
+        # The public API prohibits attaching multiple subparts to MIMEBase
+        # derived subtypes since none of them are, by definition, of content
+        # type multipart/*
+        raise errors.MultipartConversionError(
+            'Cannot attach additional subparts to non-multipart/*')

+ 44 - 0
Lib/site-packages/future/backports/email/mime/text.py

@@ -0,0 +1,44 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Class representing text/* type MIME documents."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['MIMEText']
+
+from future.backports.email.encoders import encode_7or8bit
+from future.backports.email.mime.nonmultipart import MIMENonMultipart
+
+
+class MIMEText(MIMENonMultipart):
+    """Class for generating text/* type MIME documents."""
+
+    def __init__(self, _text, _subtype='plain', _charset=None):
+        """Create a text/* type MIME document.
+
+        _text is the string for this message object.
+
+        _subtype is the MIME sub content type, defaulting to "plain".
+
+        _charset is the character set parameter added to the Content-Type
+        header.  This defaults to "us-ascii".  Note that as a side-effect, the
+        Content-Transfer-Encoding header will also be set.
+        """
+
+        # If no _charset was specified, check to see if there are non-ascii
+        # characters present. If not, use 'us-ascii', otherwise use utf-8.
+        # XXX: This can be removed once #7304 is fixed.
+        if _charset is None:
+            try:
+                _text.encode('us-ascii')
+                _charset = 'us-ascii'
+            except UnicodeEncodeError:
+                _charset = 'utf-8'
+
+        MIMENonMultipart.__init__(self, 'text', _subtype,
+                                  **{'charset': _charset})
+
+        self.set_payload(_text, _charset)

+ 135 - 0
Lib/site-packages/future/backports/email/parser.py

@@ -0,0 +1,135 @@
+# Copyright (C) 2001-2007 Python Software Foundation
+# Author: Barry Warsaw, Thomas Wouters, Anthony Baxter
+# Contact: email-sig@python.org
+
+"""A parser of RFC 2822 and MIME email messages."""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+
+__all__ = ['Parser', 'HeaderParser', 'BytesParser', 'BytesHeaderParser']
+
+import warnings
+from io import StringIO, TextIOWrapper
+
+from future.backports.email.feedparser import FeedParser, BytesFeedParser
+from future.backports.email.message import Message
+from future.backports.email._policybase import compat32
+
+
+class Parser(object):
+    def __init__(self, _class=Message, **_3to2kwargs):
+        """Parser of RFC 2822 and MIME email messages.
+
+        Creates an in-memory object tree representing the email message, which
+        can then be manipulated and turned over to a Generator to return the
+        textual representation of the message.
+
+        The string must be formatted as a block of RFC 2822 headers and header
+        continuation lines, optionally preceeded by a `Unix-from' header.  The
+        header block is terminated either by the end of the string or by a
+        blank line.
+
+        _class is the class to instantiate for new message objects when they
+        must be created.  This class must have a constructor that can take
+        zero arguments.  Default is Message.Message.
+
+        The policy keyword specifies a policy object that controls a number of
+        aspects of the parser's operation.  The default policy maintains
+        backward compatibility.
+
+        """
+        if 'policy' in _3to2kwargs: policy = _3to2kwargs['policy']; del _3to2kwargs['policy']
+        else: policy = compat32
+        self._class = _class
+        self.policy = policy
+
+    def parse(self, fp, headersonly=False):
+        """Create a message structure from the data in a file.
+
+        Reads all the data from the file and returns the root of the message
+        structure.  Optional headersonly is a flag specifying whether to stop
+        parsing after reading the headers or not.  The default is False,
+        meaning it parses the entire contents of the file.
+        """
+        feedparser = FeedParser(self._class, policy=self.policy)
+        if headersonly:
+            feedparser._set_headersonly()
+        while True:
+            data = fp.read(8192)
+            if not data:
+                break
+            feedparser.feed(data)
+        return feedparser.close()
+
+    def parsestr(self, text, headersonly=False):
+        """Create a message structure from a string.
+
+        Returns the root of the message structure.  Optional headersonly is a
+        flag specifying whether to stop parsing after reading the headers or
+        not.  The default is False, meaning it parses the entire contents of
+        the file.
+        """
+        return self.parse(StringIO(text), headersonly=headersonly)
+
+
+
+class HeaderParser(Parser):
+    def parse(self, fp, headersonly=True):
+        return Parser.parse(self, fp, True)
+
+    def parsestr(self, text, headersonly=True):
+        return Parser.parsestr(self, text, True)
+
+
+class BytesParser(object):
+
+    def __init__(self, *args, **kw):
+        """Parser of binary RFC 2822 and MIME email messages.
+
+        Creates an in-memory object tree representing the email message, which
+        can then be manipulated and turned over to a Generator to return the
+        textual representation of the message.
+
+        The input must be formatted as a block of RFC 2822 headers and header
+        continuation lines, optionally preceeded by a `Unix-from' header.  The
+        header block is terminated either by the end of the input or by a
+        blank line.
+
+        _class is the class to instantiate for new message objects when they
+        must be created.  This class must have a constructor that can take
+        zero arguments.  Default is Message.Message.
+        """
+        self.parser = Parser(*args, **kw)
+
+    def parse(self, fp, headersonly=False):
+        """Create a message structure from the data in a binary file.
+
+        Reads all the data from the file and returns the root of the message
+        structure.  Optional headersonly is a flag specifying whether to stop
+        parsing after reading the headers or not.  The default is False,
+        meaning it parses the entire contents of the file.
+        """
+        fp = TextIOWrapper(fp, encoding='ascii', errors='surrogateescape')
+        with fp:
+            return self.parser.parse(fp, headersonly)
+
+
+    def parsebytes(self, text, headersonly=False):
+        """Create a message structure from a byte string.
+
+        Returns the root of the message structure.  Optional headersonly is a
+        flag specifying whether to stop parsing after reading the headers or
+        not.  The default is False, meaning it parses the entire contents of
+        the file.
+        """
+        text = text.decode('ASCII', errors='surrogateescape')
+        return self.parser.parsestr(text, headersonly)
+
+
+class BytesHeaderParser(BytesParser):
+    def parse(self, fp, headersonly=True):
+        return BytesParser.parse(self, fp, headersonly=True)
+
+    def parsebytes(self, text, headersonly=True):
+        return BytesParser.parsebytes(self, text, headersonly=True)

+ 193 - 0
Lib/site-packages/future/backports/email/policy.py

@@ -0,0 +1,193 @@
+"""This will be the home for the policy that hooks in the new
+code that adds all the email6 features.
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import super
+
+from future.standard_library.email._policybase import (Policy, Compat32,
+                                                  compat32, _extend_docstrings)
+from future.standard_library.email.utils import _has_surrogates
+from future.standard_library.email.headerregistry import HeaderRegistry as HeaderRegistry
+
+__all__ = [
+    'Compat32',
+    'compat32',
+    'Policy',
+    'EmailPolicy',
+    'default',
+    'strict',
+    'SMTP',
+    'HTTP',
+    ]
+
+@_extend_docstrings
+class EmailPolicy(Policy):
+
+    """+
+    PROVISIONAL
+
+    The API extensions enabled by this policy are currently provisional.
+    Refer to the documentation for details.
+
+    This policy adds new header parsing and folding algorithms.  Instead of
+    simple strings, headers are custom objects with custom attributes
+    depending on the type of the field.  The folding algorithm fully
+    implements RFCs 2047 and 5322.
+
+    In addition to the settable attributes listed above that apply to
+    all Policies, this policy adds the following additional attributes:
+
+    refold_source       -- if the value for a header in the Message object
+                           came from the parsing of some source, this attribute
+                           indicates whether or not a generator should refold
+                           that value when transforming the message back into
+                           stream form.  The possible values are:
+
+                           none  -- all source values use original folding
+                           long  -- source values that have any line that is
+                                    longer than max_line_length will be
+                                    refolded
+                           all  -- all values are refolded.
+
+                           The default is 'long'.
+
+    header_factory      -- a callable that takes two arguments, 'name' and
+                           'value', where 'name' is a header field name and
+                           'value' is an unfolded header field value, and
+                           returns a string-like object that represents that
+                           header.  A default header_factory is provided that
+                           understands some of the RFC5322 header field types.
+                           (Currently address fields and date fields have
+                           special treatment, while all other fields are
+                           treated as unstructured.  This list will be
+                           completed before the extension is marked stable.)
+    """
+
+    refold_source = 'long'
+    header_factory = HeaderRegistry()
+
+    def __init__(self, **kw):
+        # Ensure that each new instance gets a unique header factory
+        # (as opposed to clones, which share the factory).
+        if 'header_factory' not in kw:
+            object.__setattr__(self, 'header_factory', HeaderRegistry())
+        super().__init__(**kw)
+
+    def header_max_count(self, name):
+        """+
+        The implementation for this class returns the max_count attribute from
+        the specialized header class that would be used to construct a header
+        of type 'name'.
+        """
+        return self.header_factory[name].max_count
+
+    # The logic of the next three methods is chosen such that it is possible to
+    # switch a Message object between a Compat32 policy and a policy derived
+    # from this class and have the results stay consistent.  This allows a
+    # Message object constructed with this policy to be passed to a library
+    # that only handles Compat32 objects, or to receive such an object and
+    # convert it to use the newer style by just changing its policy.  It is
+    # also chosen because it postpones the relatively expensive full rfc5322
+    # parse until as late as possible when parsing from source, since in many
+    # applications only a few headers will actually be inspected.
+
+    def header_source_parse(self, sourcelines):
+        """+
+        The name is parsed as everything up to the ':' and returned unmodified.
+        The value is determined by stripping leading whitespace off the
+        remainder of the first line, joining all subsequent lines together, and
+        stripping any trailing carriage return or linefeed characters.  (This
+        is the same as Compat32).
+
+        """
+        name, value = sourcelines[0].split(':', 1)
+        value = value.lstrip(' \t') + ''.join(sourcelines[1:])
+        return (name, value.rstrip('\r\n'))
+
+    def header_store_parse(self, name, value):
+        """+
+        The name is returned unchanged.  If the input value has a 'name'
+        attribute and it matches the name ignoring case, the value is returned
+        unchanged.  Otherwise the name and value are passed to header_factory
+        method, and the resulting custom header object is returned as the
+        value.  In this case a ValueError is raised if the input value contains
+        CR or LF characters.
+
+        """
+        if hasattr(value, 'name') and value.name.lower() == name.lower():
+            return (name, value)
+        if isinstance(value, str) and len(value.splitlines())>1:
+            raise ValueError("Header values may not contain linefeed "
+                             "or carriage return characters")
+        return (name, self.header_factory(name, value))
+
+    def header_fetch_parse(self, name, value):
+        """+
+        If the value has a 'name' attribute, it is returned to unmodified.
+        Otherwise the name and the value with any linesep characters removed
+        are passed to the header_factory method, and the resulting custom
+        header object is returned.  Any surrogateescaped bytes get turned
+        into the unicode unknown-character glyph.
+
+        """
+        if hasattr(value, 'name'):
+            return value
+        return self.header_factory(name, ''.join(value.splitlines()))
+
+    def fold(self, name, value):
+        """+
+        Header folding is controlled by the refold_source policy setting.  A
+        value is considered to be a 'source value' if and only if it does not
+        have a 'name' attribute (having a 'name' attribute means it is a header
+        object of some sort).  If a source value needs to be refolded according
+        to the policy, it is converted into a custom header object by passing
+        the name and the value with any linesep characters removed to the
+        header_factory method.  Folding of a custom header object is done by
+        calling its fold method with the current policy.
+
+        Source values are split into lines using splitlines.  If the value is
+        not to be refolded, the lines are rejoined using the linesep from the
+        policy and returned.  The exception is lines containing non-ascii
+        binary data.  In that case the value is refolded regardless of the
+        refold_source setting, which causes the binary data to be CTE encoded
+        using the unknown-8bit charset.
+
+        """
+        return self._fold(name, value, refold_binary=True)
+
+    def fold_binary(self, name, value):
+        """+
+        The same as fold if cte_type is 7bit, except that the returned value is
+        bytes.
+
+        If cte_type is 8bit, non-ASCII binary data is converted back into
+        bytes.  Headers with binary data are not refolded, regardless of the
+        refold_header setting, since there is no way to know whether the binary
+        data consists of single byte characters or multibyte characters.
+
+        """
+        folded = self._fold(name, value, refold_binary=self.cte_type=='7bit')
+        return folded.encode('ascii', 'surrogateescape')
+
+    def _fold(self, name, value, refold_binary=False):
+        if hasattr(value, 'name'):
+            return value.fold(policy=self)
+        maxlen = self.max_line_length if self.max_line_length else float('inf')
+        lines = value.splitlines()
+        refold = (self.refold_source == 'all' or
+                  self.refold_source == 'long' and
+                    (lines and len(lines[0])+len(name)+2 > maxlen or
+                     any(len(x) > maxlen for x in lines[1:])))
+        if refold or refold_binary and _has_surrogates(value):
+            return self.header_factory(name, ''.join(lines)).fold(policy=self)
+        return name + ': ' + self.linesep.join(lines) + self.linesep
+
+
+default = EmailPolicy()
+# Make the default policy use the class default header_factory
+del default.header_factory
+strict = default.clone(raise_on_defect=True)
+SMTP = default.clone(linesep='\r\n')
+HTTP = default.clone(linesep='\r\n', max_line_length=None)

+ 326 - 0
Lib/site-packages/future/backports/email/quoprimime.py

@@ -0,0 +1,326 @@
+# Copyright (C) 2001-2006 Python Software Foundation
+# Author: Ben Gertzfield
+# Contact: email-sig@python.org
+
+"""Quoted-printable content transfer encoding per RFCs 2045-2047.
+
+This module handles the content transfer encoding method defined in RFC 2045
+to encode US ASCII-like 8-bit data called `quoted-printable'.  It is used to
+safely encode text that is in a character set similar to the 7-bit US ASCII
+character set, but that includes some 8-bit characters that are normally not
+allowed in email bodies or headers.
+
+Quoted-printable is very space-inefficient for encoding binary files; use the
+email.base64mime module for that instead.
+
+This module provides an interface to encode and decode both headers and bodies
+with quoted-printable encoding.
+
+RFC 2045 defines a method for including character set information in an
+`encoded-word' in a header.  This method is commonly used for 8-bit real names
+in To:/From:/Cc: etc. fields, as well as Subject: lines.
+
+This module does not do the line wrapping or end-of-line character
+conversion necessary for proper internationalized headers; it only
+does dumb encoding and decoding.  To deal with the various line
+wrapping issues, use the email.header module.
+"""
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future.builtins import bytes, chr, dict, int, range, super
+
+__all__ = [
+    'body_decode',
+    'body_encode',
+    'body_length',
+    'decode',
+    'decodestring',
+    'header_decode',
+    'header_encode',
+    'header_length',
+    'quote',
+    'unquote',
+    ]
+
+import re
+import io
+
+from string import ascii_letters, digits, hexdigits
+
+CRLF = '\r\n'
+NL = '\n'
+EMPTYSTRING = ''
+
+# Build a mapping of octets to the expansion of that octet.  Since we're only
+# going to have 256 of these things, this isn't terribly inefficient
+# space-wise.  Remember that headers and bodies have different sets of safe
+# characters.  Initialize both maps with the full expansion, and then override
+# the safe bytes with the more compact form.
+_QUOPRI_HEADER_MAP = dict((c, '=%02X' % c) for c in range(256))
+_QUOPRI_BODY_MAP = _QUOPRI_HEADER_MAP.copy()
+
+# Safe header bytes which need no encoding.
+for c in bytes(b'-!*+/' + ascii_letters.encode('ascii') + digits.encode('ascii')):
+    _QUOPRI_HEADER_MAP[c] = chr(c)
+# Headers have one other special encoding; spaces become underscores.
+_QUOPRI_HEADER_MAP[ord(' ')] = '_'
+
+# Safe body bytes which need no encoding.
+for c in bytes(b' !"#$%&\'()*+,-./0123456789:;<>'
+               b'?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`'
+               b'abcdefghijklmnopqrstuvwxyz{|}~\t'):
+    _QUOPRI_BODY_MAP[c] = chr(c)
+
+
+
+# Helpers
+def header_check(octet):
+    """Return True if the octet should be escaped with header quopri."""
+    return chr(octet) != _QUOPRI_HEADER_MAP[octet]
+
+
+def body_check(octet):
+    """Return True if the octet should be escaped with body quopri."""
+    return chr(octet) != _QUOPRI_BODY_MAP[octet]
+
+
+def header_length(bytearray):
+    """Return a header quoted-printable encoding length.
+
+    Note that this does not include any RFC 2047 chrome added by
+    `header_encode()`.
+
+    :param bytearray: An array of bytes (a.k.a. octets).
+    :return: The length in bytes of the byte array when it is encoded with
+        quoted-printable for headers.
+    """
+    return sum(len(_QUOPRI_HEADER_MAP[octet]) for octet in bytearray)
+
+
+def body_length(bytearray):
+    """Return a body quoted-printable encoding length.
+
+    :param bytearray: An array of bytes (a.k.a. octets).
+    :return: The length in bytes of the byte array when it is encoded with
+        quoted-printable for bodies.
+    """
+    return sum(len(_QUOPRI_BODY_MAP[octet]) for octet in bytearray)
+
+
+def _max_append(L, s, maxlen, extra=''):
+    if not isinstance(s, str):
+        s = chr(s)
+    if not L:
+        L.append(s.lstrip())
+    elif len(L[-1]) + len(s) <= maxlen:
+        L[-1] += extra + s
+    else:
+        L.append(s.lstrip())
+
+
+def unquote(s):
+    """Turn a string in the form =AB to the ASCII character with value 0xab"""
+    return chr(int(s[1:3], 16))
+
+
+def quote(c):
+    return '=%02X' % ord(c)
+
+
+
+def header_encode(header_bytes, charset='iso-8859-1'):
+    """Encode a single header line with quoted-printable (like) encoding.
+
+    Defined in RFC 2045, this `Q' encoding is similar to quoted-printable, but
+    used specifically for email header fields to allow charsets with mostly 7
+    bit characters (and some 8 bit) to remain more or less readable in non-RFC
+    2045 aware mail clients.
+
+    charset names the character set to use in the RFC 2046 header.  It
+    defaults to iso-8859-1.
+    """
+    # Return empty headers as an empty string.
+    if not header_bytes:
+        return ''
+    # Iterate over every byte, encoding if necessary.
+    encoded = []
+    for octet in header_bytes:
+        encoded.append(_QUOPRI_HEADER_MAP[octet])
+    # Now add the RFC chrome to each encoded chunk and glue the chunks
+    # together.
+    return '=?%s?q?%s?=' % (charset, EMPTYSTRING.join(encoded))
+
+
+class _body_accumulator(io.StringIO):
+
+    def __init__(self, maxlinelen, eol, *args, **kw):
+        super().__init__(*args, **kw)
+        self.eol = eol
+        self.maxlinelen = self.room = maxlinelen
+
+    def write_str(self, s):
+        """Add string s to the accumulated body."""
+        self.write(s)
+        self.room -= len(s)
+
+    def newline(self):
+        """Write eol, then start new line."""
+        self.write_str(self.eol)
+        self.room = self.maxlinelen
+
+    def write_soft_break(self):
+        """Write a soft break, then start a new line."""
+        self.write_str('=')
+        self.newline()
+
+    def write_wrapped(self, s, extra_room=0):
+        """Add a soft line break if needed, then write s."""
+        if self.room < len(s) + extra_room:
+            self.write_soft_break()
+        self.write_str(s)
+
+    def write_char(self, c, is_last_char):
+        if not is_last_char:
+            # Another character follows on this line, so we must leave
+            # extra room, either for it or a soft break, and whitespace
+            # need not be quoted.
+            self.write_wrapped(c, extra_room=1)
+        elif c not in ' \t':
+            # For this and remaining cases, no more characters follow,
+            # so there is no need to reserve extra room (since a hard
+            # break will immediately follow).
+            self.write_wrapped(c)
+        elif self.room >= 3:
+            # It's a whitespace character at end-of-line, and we have room
+            # for the three-character quoted encoding.
+            self.write(quote(c))
+        elif self.room == 2:
+            # There's room for the whitespace character and a soft break.
+            self.write(c)
+            self.write_soft_break()
+        else:
+            # There's room only for a soft break.  The quoted whitespace
+            # will be the only content on the subsequent line.
+            self.write_soft_break()
+            self.write(quote(c))
+
+
+def body_encode(body, maxlinelen=76, eol=NL):
+    """Encode with quoted-printable, wrapping at maxlinelen characters.
+
+    Each line of encoded text will end with eol, which defaults to "\\n".  Set
+    this to "\\r\\n" if you will be using the result of this function directly
+    in an email.
+
+    Each line will be wrapped at, at most, maxlinelen characters before the
+    eol string (maxlinelen defaults to 76 characters, the maximum value
+    permitted by RFC 2045).  Long lines will have the 'soft line break'
+    quoted-printable character "=" appended to them, so the decoded text will
+    be identical to the original text.
+
+    The minimum maxlinelen is 4 to have room for a quoted character ("=XX")
+    followed by a soft line break.  Smaller values will generate a
+    ValueError.
+
+    """
+
+    if maxlinelen < 4:
+        raise ValueError("maxlinelen must be at least 4")
+    if not body:
+        return body
+
+    # The last line may or may not end in eol, but all other lines do.
+    last_has_eol = (body[-1] in '\r\n')
+
+    # This accumulator will make it easier to build the encoded body.
+    encoded_body = _body_accumulator(maxlinelen, eol)
+
+    lines = body.splitlines()
+    last_line_no = len(lines) - 1
+    for line_no, line in enumerate(lines):
+        last_char_index = len(line) - 1
+        for i, c in enumerate(line):
+            if body_check(ord(c)):
+                c = quote(c)
+            encoded_body.write_char(c, i==last_char_index)
+        # Add an eol if input line had eol.  All input lines have eol except
+        # possibly the last one.
+        if line_no < last_line_no or last_has_eol:
+            encoded_body.newline()
+
+    return encoded_body.getvalue()
+
+
+
+# BAW: I'm not sure if the intent was for the signature of this function to be
+# the same as base64MIME.decode() or not...
+def decode(encoded, eol=NL):
+    """Decode a quoted-printable string.
+
+    Lines are separated with eol, which defaults to \\n.
+    """
+    if not encoded:
+        return encoded
+    # BAW: see comment in encode() above.  Again, we're building up the
+    # decoded string with string concatenation, which could be done much more
+    # efficiently.
+    decoded = ''
+
+    for line in encoded.splitlines():
+        line = line.rstrip()
+        if not line:
+            decoded += eol
+            continue
+
+        i = 0
+        n = len(line)
+        while i < n:
+            c = line[i]
+            if c != '=':
+                decoded += c
+                i += 1
+            # Otherwise, c == "=".  Are we at the end of the line?  If so, add
+            # a soft line break.
+            elif i+1 == n:
+                i += 1
+                continue
+            # Decode if in form =AB
+            elif i+2 < n and line[i+1] in hexdigits and line[i+2] in hexdigits:
+                decoded += unquote(line[i:i+3])
+                i += 3
+            # Otherwise, not in form =AB, pass literally
+            else:
+                decoded += c
+                i += 1
+
+            if i == n:
+                decoded += eol
+    # Special case if original string did not end with eol
+    if encoded[-1] not in '\r\n' and decoded.endswith(eol):
+        decoded = decoded[:-1]
+    return decoded
+
+
+# For convenience and backwards compatibility w/ standard base64 module
+body_decode = decode
+decodestring = decode
+
+
+
+def _unquote_match(match):
+    """Turn a match in the form =AB to the ASCII character with value 0xab"""
+    s = match.group(0)
+    return unquote(s)
+
+
+# Header decoding is done a bit differently
+def header_decode(s):
+    """Decode a string encoded with RFC 2045 MIME header `Q' encoding.
+
+    This function does not parse a full MIME header value encoded with
+    quoted-printable (like =?iso-8895-1?q?Hello_World?=) -- please use
+    the high level email.header class for that functionality.
+    """
+    s = s.replace('_', ' ')
+    return re.sub(r'=[a-fA-F0-9]{2}', _unquote_match, s, re.ASCII)

+ 400 - 0
Lib/site-packages/future/backports/email/utils.py

@@ -0,0 +1,400 @@
+# Copyright (C) 2001-2010 Python Software Foundation
+# Author: Barry Warsaw
+# Contact: email-sig@python.org
+
+"""Miscellaneous utilities."""
+
+from __future__ import unicode_literals
+from __future__ import division
+from __future__ import absolute_import
+from future import utils
+from future.builtins import bytes, int, str
+
+__all__ = [
+    'collapse_rfc2231_value',
+    'decode_params',
+    'decode_rfc2231',
+    'encode_rfc2231',
+    'formataddr',
+    'formatdate',
+    'format_datetime',
+    'getaddresses',
+    'make_msgid',
+    'mktime_tz',
+    'parseaddr',
+    'parsedate',
+    'parsedate_tz',
+    'parsedate_to_datetime',
+    'unquote',
+    ]
+
+import os
+import re
+if utils.PY2:
+    re.ASCII = 0
+import time
+import base64
+import random
+import socket
+from future.backports import datetime
+from future.backports.urllib.parse import quote as url_quote, unquote as url_unquote
+import warnings
+from io import StringIO
+
+from future.backports.email._parseaddr import quote
+from future.backports.email._parseaddr import AddressList as _AddressList
+from future.backports.email._parseaddr import mktime_tz
+
+from future.backports.email._parseaddr import parsedate, parsedate_tz, _parsedate_tz
+
+from quopri import decodestring as _qdecode
+
+# Intrapackage imports
+from future.backports.email.encoders import _bencode, _qencode
+from future.backports.email.charset import Charset
+
+COMMASPACE = ', '
+EMPTYSTRING = ''
+UEMPTYSTRING = ''
+CRLF = '\r\n'
+TICK = "'"
+
+specialsre = re.compile(r'[][\\()<>@,:;".]')
+escapesre = re.compile(r'[\\"]')
+
+# How to figure out if we are processing strings that come from a byte
+# source with undecodable characters.
+_has_surrogates = re.compile(
+    '([^\ud800-\udbff]|\A)[\udc00-\udfff]([^\udc00-\udfff]|\Z)').search
+
+# How to deal with a string containing bytes before handing it to the
+# application through the 'normal' interface.
+def _sanitize(string):
+    # Turn any escaped bytes into unicode 'unknown' char.
+    original_bytes = string.encode('ascii', 'surrogateescape')
+    return original_bytes.decode('ascii', 'replace')
+
+
+# Helpers
+
+def formataddr(pair, charset='utf-8'):
+    """The inverse of parseaddr(), this takes a 2-tuple of the form
+    (realname, email_address) and returns the string value suitable
+    for an RFC 2822 From, To or Cc header.
+
+    If the first element of pair is false, then the second element is
+    returned unmodified.
+
+    Optional charset if given is the character set that is used to encode
+    realname in case realname is not ASCII safe.  Can be an instance of str or
+    a Charset-like object which has a header_encode method.  Default is
+    'utf-8'.
+    """
+    name, address = pair
+    # The address MUST (per RFC) be ascii, so raise an UnicodeError if it isn't.
+    address.encode('ascii')
+    if name:
+        try:
+            name.encode('ascii')
+        except UnicodeEncodeError:
+            if isinstance(charset, str):
+                charset = Charset(charset)
+            encoded_name = charset.header_encode(name)
+            return "%s <%s>" % (encoded_name, address)
+        else:
+            quotes = ''
+            if specialsre.search(name):
+                quotes = '"'
+            name = escapesre.sub(r'\\\g<0>', name)
+            return '%s%s%s <%s>' % (quotes, name, quotes, address)
+    return address
+
+
+
+def getaddresses(fieldvalues):
+    """Return a list of (REALNAME, EMAIL) for each fieldvalue."""
+    all = COMMASPACE.join(fieldvalues)
+    a = _AddressList(all)
+    return a.addresslist
+
+
+
+ecre = re.compile(r'''
+  =\?                   # literal =?
+  (?P<charset>[^?]*?)   # non-greedy up to the next ? is the charset
+  \?                    # literal ?
+  (?P<encoding>[qb])    # either a "q" or a "b", case insensitive
+  \?                    # literal ?
+  (?P<atom>.*?)         # non-greedy up to the next ?= is the atom
+  \?=                   # literal ?=
+  ''', re.VERBOSE | re.IGNORECASE)
+
+
+def _format_timetuple_and_zone(timetuple, zone):
+    return '%s, %02d %s %04d %02d:%02d:%02d %s' % (
+        ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun'][timetuple[6]],
+        timetuple[2],
+        ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
+         'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec'][timetuple[1] - 1],
+        timetuple[0], timetuple[3], timetuple[4], timetuple[5],
+        zone)
+
+def formatdate(timeval=None, localtime=False, usegmt=False):
+    """Returns a date string as specified by RFC 2822, e.g.:
+
+    Fri, 09 Nov 2001 01:08:47 -0000
+
+    Optional timeval if given is a floating point time value as accepted by
+    gmtime() and localtime(), otherwise the current time is used.
+
+    Optional localtime is a flag that when True, interprets timeval, and
+    returns a date relative to the local timezone instead of UTC, properly
+    taking daylight savings time into account.
+
+    Optional argument usegmt means that the timezone is written out as
+    an ascii string, not numeric one (so "GMT" instead of "+0000"). This
+    is needed for HTTP, and is only used when localtime==False.
+    """
+    # Note: we cannot use strftime() because that honors the locale and RFC
+    # 2822 requires that day and month names be the English abbreviations.
+    if timeval is None:
+        timeval = time.time()
+    if localtime:
+        now = time.localtime(timeval)
+        # Calculate timezone offset, based on whether the local zone has
+        # daylight savings time, and whether DST is in effect.
+        if time.daylight and now[-1]:
+            offset = time.altzone
+        else:
+            offset = time.timezone
+        hours, minutes = divmod(abs(offset), 3600)
+        # Remember offset is in seconds west of UTC, but the timezone is in
+        # minutes east of UTC, so the signs differ.
+        if offset > 0:
+            sign = '-'
+        else:
+            sign = '+'
+        zone = '%s%02d%02d' % (sign, hours, minutes // 60)
+    else:
+        now = time.gmtime(timeval)
+        # Timezone offset is always -0000
+        if usegmt:
+            zone = 'GMT'
+        else:
+            zone = '-0000'
+    return _format_timetuple_and_zone(now, zone)
+
+def format_datetime(dt, usegmt=False):
+    """Turn a datetime into a date string as specified in RFC 2822.
+
+    If usegmt is True, dt must be an aware datetime with an offset of zero.  In
+    this case 'GMT' will be rendered instead of the normal +0000 required by
+    RFC2822.  This is to support HTTP headers involving date stamps.
+    """
+    now = dt.timetuple()
+    if usegmt:
+        if dt.tzinfo is None or dt.tzinfo != datetime.timezone.utc:
+            raise ValueError("usegmt option requires a UTC datetime")
+        zone = 'GMT'
+    elif dt.tzinfo is None:
+        zone = '-0000'
+    else:
+        zone = dt.strftime("%z")
+    return _format_timetuple_and_zone(now, zone)
+
+
+def make_msgid(idstring=None, domain=None):
+    """Returns a string suitable for RFC 2822 compliant Message-ID, e.g:
+
+    <20020201195627.33539.96671@nightshade.la.mastaler.com>
+
+    Optional idstring if given is a string used to strengthen the
+    uniqueness of the message id.  Optional domain if given provides the
+    portion of the message id after the '@'.  It defaults to the locally
+    defined hostname.
+    """
+    timeval = time.time()
+    utcdate = time.strftime('%Y%m%d%H%M%S', time.gmtime(timeval))
+    pid = os.getpid()
+    randint = random.randrange(100000)
+    if idstring is None:
+        idstring = ''
+    else:
+        idstring = '.' + idstring
+    if domain is None:
+        domain = socket.getfqdn()
+    msgid = '<%s.%s.%s%s@%s>' % (utcdate, pid, randint, idstring, domain)
+    return msgid
+
+
+def parsedate_to_datetime(data):
+    _3to2list = list(_parsedate_tz(data))
+    dtuple, tz, = [_3to2list[:-1]] + _3to2list[-1:]
+    if tz is None:
+        return datetime.datetime(*dtuple[:6])
+    return datetime.datetime(*dtuple[:6],
+            tzinfo=datetime.timezone(datetime.timedelta(seconds=tz)))
+
+
+def parseaddr(addr):
+    addrs = _AddressList(addr).addresslist
+    if not addrs:
+        return '', ''
+    return addrs[0]
+
+
+# rfc822.unquote() doesn't properly de-backslash-ify in Python pre-2.3.
+def unquote(str):
+    """Remove quotes from a string."""
+    if len(str) > 1:
+        if str.startswith('"') and str.endswith('"'):
+            return str[1:-1].replace('\\\\', '\\').replace('\\"', '"')
+        if str.startswith('<') and str.endswith('>'):
+            return str[1:-1]
+    return str
+
+
+
+# RFC2231-related functions - parameter encoding and decoding
+def decode_rfc2231(s):
+    """Decode string according to RFC 2231"""
+    parts = s.split(TICK, 2)
+    if len(parts) <= 2:
+        return None, None, s
+    return parts
+
+
+def encode_rfc2231(s, charset=None, language=None):
+    """Encode string according to RFC 2231.
+
+    If neither charset nor language is given, then s is returned as-is.  If
+    charset is given but not language, the string is encoded using the empty
+    string for language.
+    """
+    s = url_quote(s, safe='', encoding=charset or 'ascii')
+    if charset is None and language is None:
+        return s
+    if language is None:
+        language = ''
+    return "%s'%s'%s" % (charset, language, s)
+
+
+rfc2231_continuation = re.compile(r'^(?P<name>\w+)\*((?P<num>[0-9]+)\*?)?$',
+    re.ASCII)
+
+def decode_params(params):
+    """Decode parameters list according to RFC 2231.
+
+    params is a sequence of 2-tuples containing (param name, string value).
+    """
+    # Copy params so we don't mess with the original
+    params = params[:]
+    new_params = []
+    # Map parameter's name to a list of continuations.  The values are a
+    # 3-tuple of the continuation number, the string value, and a flag
+    # specifying whether a particular segment is %-encoded.
+    rfc2231_params = {}
+    name, value = params.pop(0)
+    new_params.append((name, value))
+    while params:
+        name, value = params.pop(0)
+        if name.endswith('*'):
+            encoded = True
+        else:
+            encoded = False
+        value = unquote(value)
+        mo = rfc2231_continuation.match(name)
+        if mo:
+            name, num = mo.group('name', 'num')
+            if num is not None:
+                num = int(num)
+            rfc2231_params.setdefault(name, []).append((num, value, encoded))
+        else:
+            new_params.append((name, '"%s"' % quote(value)))
+    if rfc2231_params:
+        for name, continuations in rfc2231_params.items():
+            value = []
+            extended = False
+            # Sort by number
+            continuations.sort()
+            # And now append all values in numerical order, converting
+            # %-encodings for the encoded segments.  If any of the
+            # continuation names ends in a *, then the entire string, after
+            # decoding segments and concatenating, must have the charset and
+            # language specifiers at the beginning of the string.
+            for num, s, encoded in continuations:
+                if encoded:
+                    # Decode as "latin-1", so the characters in s directly
+                    # represent the percent-encoded octet values.
+                    # collapse_rfc2231_value treats this as an octet sequence.
+                    s = url_unquote(s, encoding="latin-1")
+                    extended = True
+                value.append(s)
+            value = quote(EMPTYSTRING.join(value))
+            if extended:
+                charset, language, value = decode_rfc2231(value)
+                new_params.append((name, (charset, language, '"%s"' % value)))
+            else:
+                new_params.append((name, '"%s"' % value))
+    return new_params
+
+def collapse_rfc2231_value(value, errors='replace',
+                           fallback_charset='us-ascii'):
+    if not isinstance(value, tuple) or len(value) != 3:
+        return unquote(value)
+    # While value comes to us as a unicode string, we need it to be a bytes
+    # object.  We do not want bytes() normal utf-8 decoder, we want a straight
+    # interpretation of the string as character bytes.
+    charset, language, text = value
+    rawbytes = bytes(text, 'raw-unicode-escape')
+    try:
+        return str(rawbytes, charset, errors)
+    except LookupError:
+        # charset is not a known codec.
+        return unquote(text)
+
+
+#
+# datetime doesn't provide a localtime function yet, so provide one.  Code
+# adapted from the patch in issue 9527.  This may not be perfect, but it is
+# better than not having it.
+#
+
+def localtime(dt=None, isdst=-1):
+    """Return local time as an aware datetime object.
+
+    If called without arguments, return current time.  Otherwise *dt*
+    argument should be a datetime instance, and it is converted to the
+    local time zone according to the system time zone database.  If *dt* is
+    naive (that is, dt.tzinfo is None), it is assumed to be in local time.
+    In this case, a positive or zero value for *isdst* causes localtime to
+    presume initially that summer time (for example, Daylight Saving Time)
+    is or is not (respectively) in effect for the specified time.  A
+    negative value for *isdst* causes the localtime() function to attempt
+    to divine whether summer time is in effect for the specified time.
+
+    """
+    if dt is None:
+        return datetime.datetime.now(datetime.timezone.utc).astimezone()
+    if dt.tzinfo is not None:
+        return dt.astimezone()
+    # We have a naive datetime.  Convert to a (localtime) timetuple and pass to
+    # system mktime together with the isdst hint.  System mktime will return
+    # seconds since epoch.
+    tm = dt.timetuple()[:-1] + (isdst,)
+    seconds = time.mktime(tm)
+    localtm = time.localtime(seconds)
+    try:
+        delta = datetime.timedelta(seconds=localtm.tm_gmtoff)
+        tz = datetime.timezone(delta, localtm.tm_zone)
+    except AttributeError:
+        # Compute UTC offset and compare with the value implied by tm_isdst.
+        # If the values match, use the zone name implied by tm_isdst.
+        delta = dt - datetime.datetime(*time.gmtime(seconds)[:6])
+        dst = time.daylight and localtm.tm_isdst > 0
+        gmtoff = -(time.altzone if dst else time.timezone)
+        if delta == datetime.timedelta(seconds=gmtoff):
+            tz = datetime.timezone(delta, time.tzname[dst])
+        else:
+            tz = datetime.timezone(delta)
+    return dt.replace(tzinfo=tz)

+ 0 - 0
Lib/site-packages/future/backports/html/__init__.py


Някои файлове не бяха показани, защото твърде много файлове са промени