diff --git a/Presentation-6698_8055_8261.pdf b/Presentation-6698_8055_8261.pdf new file mode 100644 index 0000000..f354dcf Binary files /dev/null and b/Presentation-6698_8055_8261.pdf differ diff --git a/classifier/myenv/bin/activate b/classifier/myenv/bin/activate deleted file mode 100644 index 4b90946..0000000 --- a/classifier/myenv/bin/activate +++ /dev/null @@ -1,76 +0,0 @@ -# This file must be used with "source bin/activate" *from bash* -# you cannot run it directly - -deactivate () { - # reset old environment variables - if [ -n "${_OLD_VIRTUAL_PATH:-}" ] ; then - PATH="${_OLD_VIRTUAL_PATH:-}" - export PATH - unset _OLD_VIRTUAL_PATH - fi - if [ -n "${_OLD_VIRTUAL_PYTHONHOME:-}" ] ; then - PYTHONHOME="${_OLD_VIRTUAL_PYTHONHOME:-}" - export PYTHONHOME - unset _OLD_VIRTUAL_PYTHONHOME - fi - - # This should detect bash and zsh, which have a hash command that must - # be called to get it to forget past commands. Without forgetting - # past commands the $PATH changes we made may not be respected - if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r - fi - - if [ -n "${_OLD_VIRTUAL_PS1:-}" ] ; then - PS1="${_OLD_VIRTUAL_PS1:-}" - export PS1 - unset _OLD_VIRTUAL_PS1 - fi - - unset VIRTUAL_ENV - if [ ! "$1" = "nondestructive" ] ; then - # Self destruct! - unset -f deactivate - fi -} - -# unset irrelevant variables -deactivate nondestructive - -VIRTUAL_ENV="/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv" -export VIRTUAL_ENV - -_OLD_VIRTUAL_PATH="$PATH" -PATH="$VIRTUAL_ENV/bin:$PATH" -export PATH - -# unset PYTHONHOME if set -# this will fail if PYTHONHOME is set to the empty string (which is bad anyway) -# could use `if (set -u; : $PYTHONHOME) ;` in bash -if [ -n "${PYTHONHOME:-}" ] ; then - _OLD_VIRTUAL_PYTHONHOME="${PYTHONHOME:-}" - unset PYTHONHOME -fi - -if [ -z "${VIRTUAL_ENV_DISABLE_PROMPT:-}" ] ; then - _OLD_VIRTUAL_PS1="${PS1:-}" - if [ "x(myenv) " != x ] ; then - PS1="(myenv) ${PS1:-}" - else - if [ "`basename \"$VIRTUAL_ENV\"`" = "__" ] ; then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - PS1="[`basename \`dirname \"$VIRTUAL_ENV\"\``] $PS1" - else - PS1="(`basename \"$VIRTUAL_ENV\"`)$PS1" - fi - fi - export PS1 -fi - -# This should detect bash and zsh, which have a hash command that must -# be called to get it to forget past commands. Without forgetting -# past commands the $PATH changes we made may not be respected -if [ -n "${BASH:-}" -o -n "${ZSH_VERSION:-}" ] ; then - hash -r -fi diff --git a/classifier/myenv/bin/activate.csh b/classifier/myenv/bin/activate.csh deleted file mode 100644 index af4500c..0000000 --- a/classifier/myenv/bin/activate.csh +++ /dev/null @@ -1,37 +0,0 @@ -# This file must be used with "source bin/activate.csh" *from csh*. -# You cannot run it directly. -# Created by Davide Di Blasi . -# Ported to Python 3.3 venv by Andrew Svetlov - -alias deactivate 'test $?_OLD_VIRTUAL_PATH != 0 && setenv PATH "$_OLD_VIRTUAL_PATH" && unset _OLD_VIRTUAL_PATH; rehash; test $?_OLD_VIRTUAL_PROMPT != 0 && set prompt="$_OLD_VIRTUAL_PROMPT" && unset _OLD_VIRTUAL_PROMPT; unsetenv VIRTUAL_ENV; test "\!:*" != "nondestructive" && unalias deactivate' - -# Unset irrelevant variables. -deactivate nondestructive - -setenv VIRTUAL_ENV "/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv" - -set _OLD_VIRTUAL_PATH="$PATH" -setenv PATH "$VIRTUAL_ENV/bin:$PATH" - - -set _OLD_VIRTUAL_PROMPT="$prompt" - -if (! "$?VIRTUAL_ENV_DISABLE_PROMPT") then - if ("myenv" != "") then - set env_name = "myenv" - else - if (`basename "VIRTUAL_ENV"` == "__") then - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - set env_name = `basename \`dirname "$VIRTUAL_ENV"\`` - else - set env_name = `basename "$VIRTUAL_ENV"` - endif - endif - set prompt = "[$env_name] $prompt" - unset env_name -endif - -alias pydoc python -m pydoc - -rehash diff --git a/classifier/myenv/bin/activate.fish b/classifier/myenv/bin/activate.fish deleted file mode 100644 index d6b0786..0000000 --- a/classifier/myenv/bin/activate.fish +++ /dev/null @@ -1,75 +0,0 @@ -# This file must be used with ". bin/activate.fish" *from fish* (http://fishshell.org) -# you cannot run it directly - -function deactivate -d "Exit virtualenv and return to normal shell environment" - # reset old environment variables - if test -n "$_OLD_VIRTUAL_PATH" - set -gx PATH $_OLD_VIRTUAL_PATH - set -e _OLD_VIRTUAL_PATH - end - if test -n "$_OLD_VIRTUAL_PYTHONHOME" - set -gx PYTHONHOME $_OLD_VIRTUAL_PYTHONHOME - set -e _OLD_VIRTUAL_PYTHONHOME - end - - if test -n "$_OLD_FISH_PROMPT_OVERRIDE" - functions -e fish_prompt - set -e _OLD_FISH_PROMPT_OVERRIDE - functions -c _old_fish_prompt fish_prompt - functions -e _old_fish_prompt - end - - set -e VIRTUAL_ENV - if test "$argv[1]" != "nondestructive" - # Self destruct! - functions -e deactivate - end -end - -# unset irrelevant variables -deactivate nondestructive - -set -gx VIRTUAL_ENV "/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv" - -set -gx _OLD_VIRTUAL_PATH $PATH -set -gx PATH "$VIRTUAL_ENV/bin" $PATH - -# unset PYTHONHOME if set -if set -q PYTHONHOME - set -gx _OLD_VIRTUAL_PYTHONHOME $PYTHONHOME - set -e PYTHONHOME -end - -if test -z "$VIRTUAL_ENV_DISABLE_PROMPT" - # fish uses a function instead of an env var to generate the prompt. - - # save the current fish_prompt function as the function _old_fish_prompt - functions -c fish_prompt _old_fish_prompt - - # with the original prompt function renamed, we can override with our own. - function fish_prompt - # Save the return status of the last command - set -l old_status $status - - # Prompt override? - if test -n "(myenv) " - printf "%s%s" "(myenv) " (set_color normal) - else - # ...Otherwise, prepend env - set -l _checkbase (basename "$VIRTUAL_ENV") - if test $_checkbase = "__" - # special case for Aspen magic directories - # see http://www.zetadev.com/software/aspen/ - printf "%s[%s]%s " (set_color -b blue white) (basename (dirname "$VIRTUAL_ENV")) (set_color normal) - else - printf "%s(%s)%s" (set_color -b blue white) (basename "$VIRTUAL_ENV") (set_color normal) - end - end - - # Restore the return status of the previous command. - echo "exit $old_status" | . - _old_fish_prompt - end - - set -gx _OLD_FISH_PROMPT_OVERRIDE "$VIRTUAL_ENV" -end diff --git a/classifier/myenv/bin/easy_install b/classifier/myenv/bin/easy_install deleted file mode 100755 index cd35995..0000000 --- a/classifier/myenv/bin/easy_install +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv/bin/python3 - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/classifier/myenv/bin/easy_install-3.6 b/classifier/myenv/bin/easy_install-3.6 deleted file mode 100755 index cd35995..0000000 --- a/classifier/myenv/bin/easy_install-3.6 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv/bin/python3 - -# -*- coding: utf-8 -*- -import re -import sys - -from setuptools.command.easy_install import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/classifier/myenv/bin/pip b/classifier/myenv/bin/pip deleted file mode 100755 index 4efbb96..0000000 --- a/classifier/myenv/bin/pip +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv/bin/python3 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/classifier/myenv/bin/pip3 b/classifier/myenv/bin/pip3 deleted file mode 100755 index 4efbb96..0000000 --- a/classifier/myenv/bin/pip3 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv/bin/python3 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/classifier/myenv/bin/pip3.6 b/classifier/myenv/bin/pip3.6 deleted file mode 100755 index 4efbb96..0000000 --- a/classifier/myenv/bin/pip3.6 +++ /dev/null @@ -1,11 +0,0 @@ -#!/home/chris/Storage/christina/Desktop/THMMY/AVT/THE-Assignment/classifier/myenv/bin/python3 - -# -*- coding: utf-8 -*- -import re -import sys - -from pip import main - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(main()) diff --git a/classifier/myenv/bin/python b/classifier/myenv/bin/python deleted file mode 120000 index b8a0adb..0000000 --- a/classifier/myenv/bin/python +++ /dev/null @@ -1 +0,0 @@ -python3 \ No newline at end of file diff --git a/classifier/myenv/bin/python3 b/classifier/myenv/bin/python3 deleted file mode 120000 index ae65fda..0000000 --- a/classifier/myenv/bin/python3 +++ /dev/null @@ -1 +0,0 @@ -/usr/bin/python3 \ No newline at end of file diff --git a/classifier/myenv/lib/python3.6/site-packages/__pycache__/easy_install.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/__pycache__/easy_install.cpython-36.pyc deleted file mode 100644 index a008312..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/__pycache__/easy_install.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/easy_install.py b/classifier/myenv/lib/python3.6/site-packages/easy_install.py deleted file mode 100644 index d87e984..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/easy_install.py +++ /dev/null @@ -1,5 +0,0 @@ -"""Run the EasyInstall command""" - -if __name__ == '__main__': - from setuptools.command.easy_install import main - main() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index 8ef94c4..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,39 +0,0 @@ -pip -=== - -The `PyPA recommended -`_ -tool for installing Python packages. - -* `Installation `_ -* `Documentation `_ -* `Changelog `_ -* `Github Page `_ -* `Issue Tracking `_ -* `User mailing list `_ -* `Dev mailing list `_ -* User IRC: #pypa on Freenode. -* Dev IRC: #pypa-dev on Freenode. - - -.. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.python.org/pypi/pip - -.. image:: https://img.shields.io/travis/pypa/pip/master.svg - :target: http://travis-ci.org/pypa/pip - -.. image:: https://img.shields.io/appveyor/ci/pypa/pip.svg - :target: https://ci.appveyor.com/project/pypa/pip/history - -.. image:: https://readthedocs.org/projects/pip/badge/?version=stable - :target: https://pip.pypa.io/en/stable - -Code of Conduct ---------------- - -Everyone interacting in the pip project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/INSTALLER b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/METADATA b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/METADATA deleted file mode 100644 index 291a4a0..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/METADATA +++ /dev/null @@ -1,70 +0,0 @@ -Metadata-Version: 2.0 -Name: pip -Version: 9.0.1 -Summary: The PyPA recommended tool for installing Python packages. -Home-page: https://pip.pypa.io/ -Author: The pip developers -Author-email: python-virtualenv@groups.google.com -License: MIT -Keywords: easy_install distutils setuptools egg virtualenv -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Topic :: Software Development :: Build Tools -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.6 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: Implementation :: PyPy -Requires-Python: >=2.6,!=3.0.*,!=3.1.*,!=3.2.* -Provides-Extra: testing -Provides-Extra: testing -Requires-Dist: mock; extra == 'testing' -Requires-Dist: pretend; extra == 'testing' -Requires-Dist: pytest; extra == 'testing' -Requires-Dist: scripttest (>=1.3); extra == 'testing' -Requires-Dist: virtualenv (>=1.10); extra == 'testing' - -pip -=== - -The `PyPA recommended -`_ -tool for installing Python packages. - -* `Installation `_ -* `Documentation `_ -* `Changelog `_ -* `Github Page `_ -* `Issue Tracking `_ -* `User mailing list `_ -* `Dev mailing list `_ -* User IRC: #pypa on Freenode. -* Dev IRC: #pypa-dev on Freenode. - - -.. image:: https://img.shields.io/pypi/v/pip.svg - :target: https://pypi.python.org/pypi/pip - -.. image:: https://img.shields.io/travis/pypa/pip/master.svg - :target: http://travis-ci.org/pypa/pip - -.. image:: https://img.shields.io/appveyor/ci/pypa/pip.svg - :target: https://ci.appveyor.com/project/pypa/pip/history - -.. image:: https://readthedocs.org/projects/pip/badge/?version=stable - :target: https://pip.pypa.io/en/stable - -Code of Conduct ---------------- - -Everyone interacting in the pip project's codebases, issue trackers, chat -rooms, and mailing lists is expected to follow the `PyPA Code of Conduct`_. - -.. _PyPA Code of Conduct: https://www.pypa.io/en/latest/code-of-conduct/ - - diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/RECORD b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/RECORD deleted file mode 100644 index ce64b77..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/RECORD +++ /dev/null @@ -1,123 +0,0 @@ -pip/__init__.py,sha256=ds3YAAeZwhX6e8yfxjLCxPlOb37Bh42ew09XEPJjQGE,11537 -pip/__main__.py,sha256=V6Kh-IEDEFpt1cahRE6MajUF_14qJR_Qsvn4MjWZXzE,584 -pip/basecommand.py,sha256=TTlmZesQ4Vuxcto2KqwZGmgmN5ioHEl_DeFev9ie_SA,11910 -pip/baseparser.py,sha256=AKMOeF3fTrRroiv0DmTQbdiLW0DQux2KqGC_dJJB9d0,10465 -pip/cmdoptions.py,sha256=pRptFz05iFEfSW4Flg3x1_P92sYlFvq7elhnwujikNY,16473 -pip/download.py,sha256=rA0wbmqC2n9ejX481YJSidmKgQqQDjdaxkHkHlAN68k,32171 -pip/exceptions.py,sha256=BvqH-Jw3tP2b-2IJ2kjrQemOAPMqKrQMLRIZHZQpJXk,8121 -pip/index.py,sha256=L6UhtAEZc2qw7BqfQrkPQcw2gCgEw3GukLRSA95BNyI,39950 -pip/locations.py,sha256=9rJRlgonC6QC2zGDIn_7mXaoZ9_tF_IHM2BQhWVRgbo,5626 -pip/pep425tags.py,sha256=q3kec4f6NHszuGYIhGIbVvs896D06uJAnKFgJ_wce44,10980 -pip/status_codes.py,sha256=F6uDG6Gj7RNKQJUDnd87QKqI16Us-t-B0wPF_4QMpWc,156 -pip/wheel.py,sha256=QSWmGs2ui-n4UMWm0JUY6aMCcwNKungVzbWsxI9KlJQ,32010 -pip/_vendor/__init__.py,sha256=L-0x9jj0HSZen1Fm2U0GUbxfjfwQPIXc4XJ4IAxy8D8,4804 -pip/commands/__init__.py,sha256=2Uq3HCdjchJD9FL1LB7rd5v6UySVAVizX0W3EX3hIoE,2244 -pip/commands/check.py,sha256=-A7GI1-WZBh9a4P6UoH_aR-J7I8Lz8ly7m3wnCjmevs,1382 -pip/commands/completion.py,sha256=kkPgVX7SUcJ_8Juw5GkgWaxHN9_45wmAr9mGs1zXEEs,2453 -pip/commands/download.py,sha256=8RuuPmSYgAq3iEDTqZY_1PDXRqREdUULHNjWJeAv7Mo,7810 -pip/commands/freeze.py,sha256=h6-yFMpjCjbNj8-gOm5UuoF6cg14N5rPV4TCi3_CeuI,2835 -pip/commands/hash.py,sha256=MCt4jEFyfoce0lVeNEz1x49uaTY-VDkKiBvvxrVcHkw,1597 -pip/commands/help.py,sha256=84HWkEdnGP_AEBHnn8gJP2Te0XTXRKFoXqXopbOZTNo,982 -pip/commands/install.py,sha256=o-CR1TKf-b1qaFv47nNlawqsIfDjXyIzv_iJUw1Trag,18069 -pip/commands/list.py,sha256=93bCiFyt2Qut_YHkYHJMZHpXladmxsjS-yOtZeb3uqI,11369 -pip/commands/search.py,sha256=oTs9QNdefnrmCV_JeftG0PGiMuYVmiEDF1OUaYsmDao,4502 -pip/commands/show.py,sha256=ZYM57_7U8KP9MQIIyHKQdZxmiEZByy-DRzB697VFoTY,5891 -pip/commands/uninstall.py,sha256=tz8cXz4WdpUdnt3RvpdQwH6_SNMB50egBIZWa1dwfcc,2884 -pip/commands/wheel.py,sha256=z5SEhws2YRMb0Ml1IEkg6jFZMLRpLl86bHCrQbYt5zo,7729 -pip/compat/__init__.py,sha256=2Xs_IpsmdRgHbQgQO0c8_lPvHJnQXHyGWxPbLbYJL4c,4672 -pip/compat/dictconfig.py,sha256=dRrelPDWrceDSzFT51RTEVY2GuM7UDyc5Igh_tn4Fvk,23096 -pip/models/__init__.py,sha256=0Rs7_RA4DxeOkWT5Cq4CQzDrSEhvYcN3TH2cazr72PE,71 -pip/models/index.py,sha256=pUfbO__v3mD9j-2n_ClwPS8pVyx4l2wIwyvWt8GMCRA,487 -pip/operations/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pip/operations/check.py,sha256=uwUN9cs1sPo7c0Sj6pRrSv7b22Pk29SXUImTelVchMQ,1590 -pip/operations/freeze.py,sha256=k-7w7LsM-RpPv7ERBzHiPpYkH-GuYfHLyR-Cp_1VPL0,5194 -pip/req/__init__.py,sha256=vFwZY8_Vc1WU1zFAespg1My_r_AT3n7cN0W9eX0EFqk,276 -pip/req/req_file.py,sha256=fG9MDsXUNPhmGwxUiwrIXEynyD8Q7s3L47-hLZPDXq0,11926 -pip/req/req_install.py,sha256=gYrH-lwQMmt55VVbav_EtRIPu94cQbHFHm_Kq6AeHbg,46487 -pip/req/req_set.py,sha256=jHspXqcA2FxcF05dgUIAZ5huYPv6bn0wRUX0Z7PKmaA,34462 -pip/req/req_uninstall.py,sha256=fdH2VgCjEC8NRYDS7fRu3ZJaBBUEy-N5muwxDX5MBNM,6897 -pip/utils/__init__.py,sha256=zk1vF2EzHZX1ZKPwgeC9I6yKvs8IJ6NZEfXgp2IP8hI,27912 -pip/utils/appdirs.py,sha256=kj2LK-I2fC5QnEh_A_v-ev_IQMcXaWWF5DE39sNvCLQ,8811 -pip/utils/build.py,sha256=4smLRrfSCmXmjEnVnMFh2tBEpNcSLRe6J0ejZJ-wWJE,1312 -pip/utils/deprecation.py,sha256=X_FMjtDbMJqfqEkdRrki-mYyIdPB6I6DHUTCA_ChY6M,2232 -pip/utils/encoding.py,sha256=NQxGiFS5GbeAveLZTnx92t5r0PYqvt0iRnP2u9SGG1w,971 -pip/utils/filesystem.py,sha256=ZEVBuYM3fqr2_lgOESh4Y7fPFszGD474zVm_M3Mb5Tk,899 -pip/utils/glibc.py,sha256=jcQYjt_oJLPKVZB28Kauy4Sw70zS-wawxoU1HHX36_0,2939 -pip/utils/hashes.py,sha256=oMk7cd3PbJgzpSQyXq1MytMud5f6H5Oa2YY5hYuCq6I,2866 -pip/utils/logging.py,sha256=7yWu4gZw-Qclj7X80QVdpGWkdTWGKT4LiUVKcE04pro,3327 -pip/utils/outdated.py,sha256=fNwOCL5r2EftPGhgCYGMKu032HC8cV-JAr9lp0HmToM,5455 -pip/utils/packaging.py,sha256=qhmli14odw6DIhWJgQYS2Q0RrSbr8nXNcG48f5yTRms,2080 -pip/utils/setuptools_build.py,sha256=0blfscmNJW_iZ5DcswJeDB_PbtTEjfK9RL1R1WEDW2E,278 -pip/utils/ui.py,sha256=pbDkSAeumZ6jdZcOJ2yAbx8iBgeP2zfpqNnLJK1gskQ,11597 -pip/vcs/__init__.py,sha256=WafFliUTHMmsSISV8PHp1M5EXDNSWyJr78zKaQmPLdY,12374 -pip/vcs/bazaar.py,sha256=tYTwc4b4off8mr0O2o8SiGejqBDJxcbDBMSMd9-ISYc,3803 -pip/vcs/git.py,sha256=5LfWryi78A-2ULjEZJvCTarJ_3l8venwXASlwm8hiug,11197 -pip/vcs/mercurial.py,sha256=xG6rDiwHCRytJEs23SIHBXl_SwQo2jkkdD_6rVVP5h4,3472 -pip/vcs/subversion.py,sha256=GAuX2Sk7IZvJyEzENKcVld_wGBrQ3fpXDlXjapZEYdI,9350 -pip-9.0.1.dist-info/DESCRIPTION.rst,sha256=Va8Wj1XBpTbVQ2Z41mZRJdALEeziiS_ZewWn1H2ecY4,1287 -pip-9.0.1.dist-info/METADATA,sha256=LZLdUBpPmFB4Of_9wIHegCXhbmiByzOv6WCGs3rixt0,2553 -pip-9.0.1.dist-info/RECORD,, -pip-9.0.1.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -pip-9.0.1.dist-info/entry_points.txt,sha256=Q-fR2tcp9DRdeXoGn1wR67Xecy32o5EyQEnzDghwqqk,68 -pip-9.0.1.dist-info/metadata.json,sha256=eAfMY0s5HjwtLLjIZ9LYDxWocl2her-knzH7qTJ38CU,1565 -pip-9.0.1.dist-info/top_level.txt,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -../../../bin/pip,sha256=BZ5XUh9I1BRLnSOOS1QrrMyNrU3dEhtqh0JBYY6LCxs,281 -../../../bin/pip3,sha256=BZ5XUh9I1BRLnSOOS1QrrMyNrU3dEhtqh0JBYY6LCxs,281 -../../../bin/pip3.6,sha256=BZ5XUh9I1BRLnSOOS1QrrMyNrU3dEhtqh0JBYY6LCxs,281 -pip-9.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pip/vcs/__pycache__/git.cpython-36.pyc,, -pip/vcs/__pycache__/__init__.cpython-36.pyc,, -pip/vcs/__pycache__/mercurial.cpython-36.pyc,, -pip/vcs/__pycache__/subversion.cpython-36.pyc,, -pip/vcs/__pycache__/bazaar.cpython-36.pyc,, -pip/compat/__pycache__/__init__.cpython-36.pyc,, -pip/compat/__pycache__/dictconfig.cpython-36.pyc,, -pip/commands/__pycache__/wheel.cpython-36.pyc,, -pip/commands/__pycache__/freeze.cpython-36.pyc,, -pip/commands/__pycache__/__init__.cpython-36.pyc,, -pip/commands/__pycache__/search.cpython-36.pyc,, -pip/commands/__pycache__/completion.cpython-36.pyc,, -pip/commands/__pycache__/hash.cpython-36.pyc,, -pip/commands/__pycache__/download.cpython-36.pyc,, -pip/commands/__pycache__/help.cpython-36.pyc,, -pip/commands/__pycache__/uninstall.cpython-36.pyc,, -pip/commands/__pycache__/show.cpython-36.pyc,, -pip/commands/__pycache__/install.cpython-36.pyc,, -pip/commands/__pycache__/list.cpython-36.pyc,, -pip/commands/__pycache__/check.cpython-36.pyc,, -pip/_vendor/__pycache__/__init__.cpython-36.pyc,, -pip/utils/__pycache__/encoding.cpython-36.pyc,, -pip/utils/__pycache__/appdirs.cpython-36.pyc,, -pip/utils/__pycache__/__init__.cpython-36.pyc,, -pip/utils/__pycache__/hashes.cpython-36.pyc,, -pip/utils/__pycache__/filesystem.cpython-36.pyc,, -pip/utils/__pycache__/ui.cpython-36.pyc,, -pip/utils/__pycache__/setuptools_build.cpython-36.pyc,, -pip/utils/__pycache__/packaging.cpython-36.pyc,, -pip/utils/__pycache__/build.cpython-36.pyc,, -pip/utils/__pycache__/outdated.cpython-36.pyc,, -pip/utils/__pycache__/glibc.cpython-36.pyc,, -pip/utils/__pycache__/deprecation.cpython-36.pyc,, -pip/utils/__pycache__/logging.cpython-36.pyc,, -pip/models/__pycache__/__init__.cpython-36.pyc,, -pip/models/__pycache__/index.cpython-36.pyc,, -pip/operations/__pycache__/freeze.cpython-36.pyc,, -pip/operations/__pycache__/__init__.cpython-36.pyc,, -pip/operations/__pycache__/check.cpython-36.pyc,, -pip/__pycache__/wheel.cpython-36.pyc,, -pip/__pycache__/__init__.cpython-36.pyc,, -pip/__pycache__/index.cpython-36.pyc,, -pip/__pycache__/download.cpython-36.pyc,, -pip/__pycache__/status_codes.cpython-36.pyc,, -pip/__pycache__/basecommand.cpython-36.pyc,, -pip/__pycache__/cmdoptions.cpython-36.pyc,, -pip/__pycache__/locations.cpython-36.pyc,, -pip/__pycache__/baseparser.cpython-36.pyc,, -pip/__pycache__/exceptions.cpython-36.pyc,, -pip/__pycache__/pep425tags.cpython-36.pyc,, -pip/__pycache__/__main__.cpython-36.pyc,, -pip/req/__pycache__/__init__.cpython-36.pyc,, -pip/req/__pycache__/req_install.cpython-36.pyc,, -pip/req/__pycache__/req_uninstall.cpython-36.pyc,, -pip/req/__pycache__/req_set.cpython-36.pyc,, -pip/req/__pycache__/req_file.cpython-36.pyc,, diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/WHEEL b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/WHEEL deleted file mode 100644 index 7332a41..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/entry_points.txt b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/entry_points.txt deleted file mode 100644 index 879fd89..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/entry_points.txt +++ /dev/null @@ -1,5 +0,0 @@ -[console_scripts] -pip = pip:main -pip3 = pip:main -pip3.6 = pip:main - diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/metadata.json b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/metadata.json deleted file mode 100644 index 15c01e9..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Software Development :: Build Tools", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: Implementation :: PyPy"], "extensions": {"python.commands": {"wrap_console": {"pip": "pip:main", "pip3": "pip:main", "pip3.6": "pip:main"}}, "python.details": {"contacts": [{"email": "python-virtualenv@groups.google.com", "name": "The pip developers", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://pip.pypa.io/"}}, "python.exports": {"console_scripts": {"pip": "pip:main", "pip3": "pip:main", "pip3.6": "pip:main"}}}, "extras": ["testing"], "generator": "bdist_wheel (0.30.0)", "keywords": ["easy_install", "distutils", "setuptools", "egg", "virtualenv"], "license": "MIT", "metadata_version": "2.0", "name": "pip", "requires_python": ">=2.6,!=3.0.*,!=3.1.*,!=3.2.*", "run_requires": [{"extra": "testing", "requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "summary": "The PyPA recommended tool for installing Python packages.", "test_requires": [{"requires": ["mock", "pretend", "pytest", "scripttest (>=1.3)", "virtualenv (>=1.10)"]}], "version": "9.0.1"} \ No newline at end of file diff --git a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/top_level.txt b/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/top_level.txt deleted file mode 100644 index a1b589e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip-9.0.1.dist-info/top_level.txt +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/__init__.py deleted file mode 100644 index 3b197d6..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/__init__.py +++ /dev/null @@ -1,338 +0,0 @@ -#!/usr/bin/env python -from __future__ import absolute_import - -import locale -import logging -import os -import optparse -import warnings - -import sys -import re - -# 2016-06-17 barry@debian.org: urllib3 1.14 added optional support for socks, -# but if invoked (i.e. imported), it will issue a warning to stderr if socks -# isn't available. requests unconditionally imports urllib3's socks contrib -# module, triggering this warning. The warning breaks DEP-8 tests (because of -# the stderr output) and is just plain annoying in normal usage. I don't want -# to add socks as yet another dependency for pip, nor do I want to allow-stder -# in the DEP-8 tests, so just suppress the warning. pdb tells me this has to -# be done before the import of pip.vcs. -try: - from pip._vendor.requests.packages.urllib3.exceptions import DependencyWarning -except ImportError: - from urllib3.exceptions import DependencyWarning -warnings.filterwarnings("ignore", category=DependencyWarning) # noqa - - -from pip.exceptions import InstallationError, CommandError, PipError -from pip.utils import get_installed_distributions, get_prog -from pip.utils import deprecation, dist_is_editable -from pip.vcs import git, mercurial, subversion, bazaar # noqa -from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip.commands import get_summaries, get_similar_commands -from pip.commands import commands_dict -try: - from pip._vendor.requests.packages.urllib3.exceptions import ( - InsecureRequestWarning, - ) -except ImportError: - from urllib3.exceptions import ( - InsecureRequestWarning, - ) - -# assignment for flake8 to be happy - -# This fixes a peculiarity when importing via __import__ - as we are -# initialising the pip module, "from pip import cmdoptions" is recursive -# and appears not to work properly in that situation. -import pip.cmdoptions -cmdoptions = pip.cmdoptions - -# The version as used in the setup.py and the docs conf.py -__version__ = "9.0.1" - - -logger = logging.getLogger(__name__) - -# Hide the InsecureRequestWarning from urllib3 -warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - -def autocomplete(): - """Command and option completion for the main option parser (and options) - and its subcommands (and options). - - Enable by sourcing one of the completion shell scripts (bash, zsh or fish). - """ - # Don't complete if user hasn't sourced bash_completion file. - if 'PIP_AUTO_COMPLETE' not in os.environ: - return - cwords = os.environ['COMP_WORDS'].split()[1:] - cword = int(os.environ['COMP_CWORD']) - try: - current = cwords[cword - 1] - except IndexError: - current = '' - - subcommands = [cmd for cmd, summary in get_summaries()] - options = [] - # subcommand - try: - subcommand_name = [w for w in cwords if w in subcommands][0] - except IndexError: - subcommand_name = None - - parser = create_main_parser() - # subcommand options - if subcommand_name: - # special case: 'help' subcommand has no options - if subcommand_name == 'help': - sys.exit(1) - # special case: list locally installed dists for uninstall command - if subcommand_name == 'uninstall' and not current.startswith('-'): - installed = [] - lc = current.lower() - for dist in get_installed_distributions(local_only=True): - if dist.key.startswith(lc) and dist.key not in cwords[1:]: - installed.append(dist.key) - # if there are no dists installed, fall back to option completion - if installed: - for dist in installed: - print(dist) - sys.exit(1) - - subcommand = commands_dict[subcommand_name]() - options += [(opt.get_opt_string(), opt.nargs) - for opt in subcommand.parser.option_list_all - if opt.help != optparse.SUPPRESS_HELP] - - # filter out previously specified options from available options - prev_opts = [x.split('=')[0] for x in cwords[1:cword - 1]] - options = [(x, v) for (x, v) in options if x not in prev_opts] - # filter options by current input - options = [(k, v) for k, v in options if k.startswith(current)] - for option in options: - opt_label = option[0] - # append '=' to options which require args - if option[1]: - opt_label += '=' - print(opt_label) - else: - # show main parser options only when necessary - if current.startswith('-') or current.startswith('--'): - opts = [i.option_list for i in parser.option_groups] - opts.append(parser.option_list) - opts = (o for it in opts for o in it) - - subcommands += [i.get_opt_string() for i in opts - if i.help != optparse.SUPPRESS_HELP] - - print(' '.join([x for x in subcommands if x.startswith(current)])) - sys.exit(1) - - -def create_main_parser(): - parser_kw = { - 'usage': '\n%prog [options]', - 'add_help_option': False, - 'formatter': UpdatingDefaultsHelpFormatter(), - 'name': 'global', - 'prog': get_prog(), - } - - parser = ConfigOptionParser(**parser_kw) - parser.disable_interspersed_args() - - pip_pkg_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - parser.version = 'pip %s from %s (python %s)' % ( - __version__, pip_pkg_dir, sys.version[:3]) - - # add the general options - gen_opts = cmdoptions.make_option_group(cmdoptions.general_group, parser) - parser.add_option_group(gen_opts) - - parser.main = True # so the help formatter knows - - # create command listing for description - command_summaries = get_summaries() - description = [''] + ['%-27s %s' % (i, j) for i, j in command_summaries] - parser.description = '\n'.join(description) - - return parser - - -def parseopts(args): - parser = create_main_parser() - - # Note: parser calls disable_interspersed_args(), so the result of this - # call is to split the initial args into the general options before the - # subcommand and everything else. - # For example: - # args: ['--timeout=5', 'install', '--user', 'INITools'] - # general_options: ['--timeout==5'] - # args_else: ['install', '--user', 'INITools'] - general_options, args_else = parser.parse_args(args) - - # --version - if general_options.version: - sys.stdout.write(parser.version) - sys.stdout.write(os.linesep) - sys.exit() - - # pip || pip help -> print_help() - if not args_else or (args_else[0] == 'help' and len(args_else) == 1): - parser.print_help() - sys.exit() - - # the subcommand name - cmd_name = args_else[0] - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - # all the args without the subcommand - cmd_args = args[:] - cmd_args.remove(cmd_name) - - return cmd_name, cmd_args - - -def check_isolated(args): - isolated = False - - if "--isolated" in args: - isolated = True - - return isolated - - -def main(args=None): - if args is None: - args = sys.argv[1:] - - # Configure our deprecation warnings to be sent through loggers - deprecation.install_warning_logger() - - autocomplete() - - try: - cmd_name, cmd_args = parseopts(args) - except PipError as exc: - sys.stderr.write("ERROR: %s" % exc) - sys.stderr.write(os.linesep) - sys.exit(1) - - # Needed for locale.getpreferredencoding(False) to work - # in pip.utils.encoding.auto_decode - try: - locale.setlocale(locale.LC_ALL, '') - except locale.Error as e: - # setlocale can apparently crash if locale are uninitialized - logger.debug("Ignoring error %s when setting locale", e) - command = commands_dict[cmd_name](isolated=check_isolated(cmd_args)) - return command.main(cmd_args) - - -# ########################################################### -# # Writing freeze files - -class FrozenRequirement(object): - - def __init__(self, name, req, editable, comments=()): - self.name = name - self.req = req - self.editable = editable - self.comments = comments - - _rev_re = re.compile(r'-r(\d+)$') - _date_re = re.compile(r'-(20\d\d\d\d\d\d)$') - - @classmethod - def from_dist(cls, dist, dependency_links): - location = os.path.normcase(os.path.abspath(dist.location)) - comments = [] - from pip.vcs import vcs, get_src_requirement - if dist_is_editable(dist) and vcs.get_backend_name(location): - editable = True - try: - req = get_src_requirement(dist, location) - except InstallationError as exc: - logger.warning( - "Error when trying to get requirement for VCS system %s, " - "falling back to uneditable format", exc - ) - req = None - if req is None: - logger.warning( - 'Could not determine repository location of %s', location - ) - comments.append( - '## !! Could not determine repository location' - ) - req = dist.as_requirement() - editable = False - else: - editable = False - req = dist.as_requirement() - specs = req.specs - assert len(specs) == 1 and specs[0][0] in ["==", "==="], \ - 'Expected 1 spec with == or ===; specs = %r; dist = %r' % \ - (specs, dist) - version = specs[0][1] - ver_match = cls._rev_re.search(version) - date_match = cls._date_re.search(version) - if ver_match or date_match: - svn_backend = vcs.get_backend('svn') - if svn_backend: - svn_location = svn_backend().get_location( - dist, - dependency_links, - ) - if not svn_location: - logger.warning( - 'Warning: cannot find svn location for %s', req) - comments.append( - '## FIXME: could not find svn URL in dependency_links ' - 'for this package:' - ) - else: - comments.append( - '# Installing as editable to satisfy requirement %s:' % - req - ) - if ver_match: - rev = ver_match.group(1) - else: - rev = '{%s}' % date_match.group(1) - editable = True - req = '%s@%s#egg=%s' % ( - svn_location, - rev, - cls.egg_name(dist) - ) - return cls(dist.project_name, req, editable, comments) - - @staticmethod - def egg_name(dist): - name = dist.egg_name() - match = re.search(r'-py\d\.\d$', name) - if match: - name = name[:match.start()] - return name - - def __str__(self): - req = self.req - if self.editable: - req = '-e %s' % req - return '\n'.join(list(self.comments) + [str(req)]) + '\n' - - -if __name__ == '__main__': - sys.exit(main()) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__main__.py b/classifier/myenv/lib/python3.6/site-packages/pip/__main__.py deleted file mode 100644 index 5556539..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/__main__.py +++ /dev/null @@ -1,19 +0,0 @@ -from __future__ import absolute_import - -import os -import sys - -# If we are running from a wheel, add the wheel to sys.path -# This allows the usage python pip-*.whl/pip install pip-*.whl -if __package__ == '': - # __file__ is pip-*.whl/pip/__main__.py - # first dirname call strips of '/__main__.py', second strips off '/pip' - # Resulting path is the name of the wheel itself - # Add that to sys.path so we can import pip - path = os.path.dirname(os.path.dirname(__file__)) - sys.path.insert(0, path) - -import pip # noqa - -if __name__ == '__main__': - sys.exit(pip.main()) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index e124015..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__main__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__main__.cpython-36.pyc deleted file mode 100644 index 2d92682..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/__main__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/basecommand.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/basecommand.cpython-36.pyc deleted file mode 100644 index 6d7f979..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/basecommand.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/baseparser.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/baseparser.cpython-36.pyc deleted file mode 100644 index 15cfb16..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/baseparser.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/cmdoptions.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/cmdoptions.cpython-36.pyc deleted file mode 100644 index e2ca76f..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/cmdoptions.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/download.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/download.cpython-36.pyc deleted file mode 100644 index 08e8fa1..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/download.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/exceptions.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/exceptions.cpython-36.pyc deleted file mode 100644 index a0e3d7a..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/exceptions.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/index.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/index.cpython-36.pyc deleted file mode 100644 index 8db3de7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/index.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/locations.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/locations.cpython-36.pyc deleted file mode 100644 index b184862..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/locations.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/pep425tags.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/pep425tags.cpython-36.pyc deleted file mode 100644 index 1f6aa5c..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/pep425tags.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/status_codes.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/status_codes.cpython-36.pyc deleted file mode 100644 index 3378183..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/status_codes.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/wheel.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/wheel.cpython-36.pyc deleted file mode 100644 index 5ed9709..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/__pycache__/wheel.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__init__.py deleted file mode 100644 index 8e76ab8..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__init__.py +++ /dev/null @@ -1,111 +0,0 @@ -""" -pip._vendor is for vendoring dependencies of pip to prevent needing pip to -depend on something external. - -Files inside of pip._vendor should be considered immutable and should only be -updated to versions from upstream. -""" -from __future__ import absolute_import - -import glob -import os.path -import sys - -# Downstream redistributors which have debundled our dependencies should also -# patch this value to be true. This will trigger the additional patching -# to cause things like "six" to be available as pip. -DEBUNDLED = True - -# By default, look in this directory for a bunch of .whl files which we will -# add to the beginning of sys.path before attempting to import anything. This -# is done to support downstream re-distributors like Debian and Fedora who -# wish to create their own Wheels for our dependencies to aid in debundling. -WHEEL_DIR = os.path.abspath(os.path.join(sys.prefix, 'share', 'python-wheels')) - - -# Define a small helper function to alias our vendored modules to the real ones -# if the vendored ones do not exist. This idea of this was taken from -# https://github.com/kennethreitz/requests/pull/2567. -def vendored(modulename): - vendored_name = "{0}.{1}".format(__name__, modulename) - - try: - __import__(vendored_name, globals(), locals(), level=0) - except ImportError: - try: - __import__(modulename, globals(), locals(), level=0) - except ImportError: - # We can just silently allow import failures to pass here. If we - # got to this point it means that ``import pip._vendor.whatever`` - # failed and so did ``import whatever``. Since we're importing this - # upfront in an attempt to alias imports, not erroring here will - # just mean we get a regular import error whenever pip *actually* - # tries to import one of these modules to use it, which actually - # gives us a better error message than we would have otherwise - # gotten. - pass - else: - sys.modules[vendored_name] = sys.modules[modulename] - base, head = vendored_name.rsplit(".", 1) - setattr(sys.modules[base], head, sys.modules[modulename]) - - -# If we're operating in a debundled setup, then we want to go ahead and trigger -# the aliasing of our vendored libraries as well as looking for wheels to add -# to our sys.path. This will cause all of this code to be a no-op typically -# however downstream redistributors can enable it in a consistent way across -# all platforms. -if DEBUNDLED: - # Actually look inside of WHEEL_DIR to find .whl files and add them to the - # front of our sys.path. - sys.path[:] = glob.glob(os.path.join(WHEEL_DIR, "*.whl")) + sys.path - - # Actually alias all of our vendored dependencies. - vendored("cachecontrol") - vendored("colorama") - vendored("distlib") - vendored("distro") - vendored("html5lib") - vendored("lockfile") - vendored("six") - vendored("six.moves") - vendored("six.moves.urllib") - vendored("packaging") - vendored("packaging.version") - vendored("packaging.specifiers") - vendored("pkg_resources") - vendored("progress") - vendored("retrying") - vendored("requests") - vendored("requests.packages") - vendored("requests.packages.urllib3") - vendored("requests.packages.urllib3._collections") - vendored("requests.packages.urllib3.connection") - vendored("requests.packages.urllib3.connectionpool") - vendored("requests.packages.urllib3.contrib") - vendored("requests.packages.urllib3.contrib.ntlmpool") - vendored("requests.packages.urllib3.contrib.pyopenssl") - vendored("requests.packages.urllib3.exceptions") - vendored("requests.packages.urllib3.fields") - vendored("requests.packages.urllib3.filepost") - vendored("requests.packages.urllib3.packages") - try: - vendored("requests.packages.urllib3.packages.ordered_dict") - vendored("requests.packages.urllib3.packages.six") - except ImportError: - # Debian already unbundles these from requests. - pass - vendored("requests.packages.urllib3.packages.ssl_match_hostname") - vendored("requests.packages.urllib3.packages.ssl_match_hostname." - "_implementation") - vendored("requests.packages.urllib3.poolmanager") - vendored("requests.packages.urllib3.request") - vendored("requests.packages.urllib3.response") - vendored("requests.packages.urllib3.util") - vendored("requests.packages.urllib3.util.connection") - vendored("requests.packages.urllib3.util.request") - vendored("requests.packages.urllib3.util.response") - vendored("requests.packages.urllib3.util.retry") - vendored("requests.packages.urllib3.util.ssl_") - vendored("requests.packages.urllib3.util.timeout") - vendored("requests.packages.urllib3.util.url") diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index ea85f4b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/_vendor/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/basecommand.py b/classifier/myenv/lib/python3.6/site-packages/pip/basecommand.py deleted file mode 100644 index 54c6706..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/basecommand.py +++ /dev/null @@ -1,337 +0,0 @@ -"""Base Command class, and related routines""" -from __future__ import absolute_import - -import logging -import os -import sys -import optparse -import warnings - -from pip import cmdoptions -from pip.index import PackageFinder -from pip.locations import running_under_virtualenv -from pip.download import PipSession -from pip.exceptions import (BadCommand, InstallationError, UninstallationError, - CommandError, PreviousBuildDirError) - -from pip.compat import logging_dictConfig -from pip.baseparser import ConfigOptionParser, UpdatingDefaultsHelpFormatter -from pip.req import InstallRequirement, parse_requirements -from pip.status_codes import ( - SUCCESS, ERROR, UNKNOWN_ERROR, VIRTUALENV_NOT_FOUND, - PREVIOUS_BUILD_DIR_ERROR, -) -from pip.utils import deprecation, get_prog, normalize_path -from pip.utils.logging import IndentingFormatter -from pip.utils.outdated import pip_version_check - - -__all__ = ['Command'] - - -logger = logging.getLogger(__name__) - - -class Command(object): - name = None - usage = None - hidden = False - log_streams = ("ext://sys.stdout", "ext://sys.stderr") - - def __init__(self, isolated=False): - parser_kw = { - 'usage': self.usage, - 'prog': '%s %s' % (get_prog(), self.name), - 'formatter': UpdatingDefaultsHelpFormatter(), - 'add_help_option': False, - 'name': self.name, - 'description': self.__doc__, - 'isolated': isolated, - } - - self.parser = ConfigOptionParser(**parser_kw) - - # Commands should add options to this option group - optgroup_name = '%s Options' % self.name.capitalize() - self.cmd_opts = optparse.OptionGroup(self.parser, optgroup_name) - - # Add the general options - gen_opts = cmdoptions.make_option_group( - cmdoptions.general_group, - self.parser, - ) - self.parser.add_option_group(gen_opts) - - def _build_session(self, options, retries=None, timeout=None): - session = PipSession( - cache=( - normalize_path(os.path.join(options.cache_dir, "http")) - if options.cache_dir else None - ), - retries=retries if retries is not None else options.retries, - insecure_hosts=options.trusted_hosts, - ) - - # Handle custom ca-bundles from the user - if options.cert: - session.verify = options.cert - - # Handle SSL client certificate - if options.client_cert: - session.cert = options.client_cert - - # Handle timeouts - if options.timeout or timeout: - session.timeout = ( - timeout if timeout is not None else options.timeout - ) - - # Handle configured proxies - if options.proxy: - session.proxies = { - "http": options.proxy, - "https": options.proxy, - } - - # Determine if we can prompt the user for authentication or not - session.auth.prompting = not options.no_input - - return session - - def parse_args(self, args): - # factored out for testability - return self.parser.parse_args(args) - - def main(self, args): - options, args = self.parse_args(args) - - if options.quiet: - if options.quiet == 1: - level = "WARNING" - if options.quiet == 2: - level = "ERROR" - else: - level = "CRITICAL" - elif options.verbose: - level = "DEBUG" - else: - level = "INFO" - - # The root logger should match the "console" level *unless* we - # specified "--log" to send debug logs to a file. - root_level = level - if options.log: - root_level = "DEBUG" - - logging_dictConfig({ - "version": 1, - "disable_existing_loggers": False, - "filters": { - "exclude_warnings": { - "()": "pip.utils.logging.MaxLevelFilter", - "level": logging.WARNING, - }, - }, - "formatters": { - "indent": { - "()": IndentingFormatter, - "format": "%(message)s", - }, - }, - "handlers": { - "console": { - "level": level, - "class": "pip.utils.logging.ColorizedStreamHandler", - "stream": self.log_streams[0], - "filters": ["exclude_warnings"], - "formatter": "indent", - }, - "console_errors": { - "level": "WARNING", - "class": "pip.utils.logging.ColorizedStreamHandler", - "stream": self.log_streams[1], - "formatter": "indent", - }, - "user_log": { - "level": "DEBUG", - "class": "pip.utils.logging.BetterRotatingFileHandler", - "filename": options.log or "/dev/null", - "delay": True, - "formatter": "indent", - }, - }, - "root": { - "level": root_level, - "handlers": list(filter(None, [ - "console", - "console_errors", - "user_log" if options.log else None, - ])), - }, - # Disable any logging besides WARNING unless we have DEBUG level - # logging enabled. These use both pip._vendor and the bare names - # for the case where someone unbundles our libraries. - "loggers": dict( - ( - name, - { - "level": ( - "WARNING" - if level in ["INFO", "ERROR"] - else "DEBUG" - ), - }, - ) - for name in ["pip._vendor", "distlib", "requests", "urllib3"] - ), - }) - - if sys.version_info[:2] == (2, 6): - warnings.warn( - "Python 2.6 is no longer supported by the Python core team, " - "please upgrade your Python. A future version of pip will " - "drop support for Python 2.6", - deprecation.Python26DeprecationWarning - ) - - # TODO: try to get these passing down from the command? - # without resorting to os.environ to hold these. - - if options.no_input: - os.environ['PIP_NO_INPUT'] = '1' - - if options.exists_action: - os.environ['PIP_EXISTS_ACTION'] = ' '.join(options.exists_action) - - if options.require_venv: - # If a venv is required check if it can really be found - if not running_under_virtualenv(): - logger.critical( - 'Could not find an activated virtualenv (required).' - ) - sys.exit(VIRTUALENV_NOT_FOUND) - - try: - status = self.run(options, args) - # FIXME: all commands should return an exit status - # and when it is done, isinstance is not needed anymore - if isinstance(status, int): - return status - except PreviousBuildDirError as exc: - logger.critical(str(exc)) - logger.debug('Exception information:', exc_info=True) - - return PREVIOUS_BUILD_DIR_ERROR - except (InstallationError, UninstallationError, BadCommand) as exc: - logger.critical(str(exc)) - logger.debug('Exception information:', exc_info=True) - - return ERROR - except CommandError as exc: - logger.critical('ERROR: %s', exc) - logger.debug('Exception information:', exc_info=True) - - return ERROR - except KeyboardInterrupt: - logger.critical('Operation cancelled by user') - logger.debug('Exception information:', exc_info=True) - - return ERROR - except: - logger.critical('Exception:', exc_info=True) - - return UNKNOWN_ERROR - finally: - # Check if we're using the latest version of pip available - if (not options.disable_pip_version_check and not - getattr(options, "no_index", False)): - with self._build_session( - options, - retries=0, - timeout=min(5, options.timeout)) as session: - pip_version_check(session) - - return SUCCESS - - -class RequirementCommand(Command): - - @staticmethod - def populate_requirement_set(requirement_set, args, options, finder, - session, name, wheel_cache): - """ - Marshal cmd line args into a requirement set. - """ - for filename in options.constraints: - for req in parse_requirements( - filename, - constraint=True, finder=finder, options=options, - session=session, wheel_cache=wheel_cache): - requirement_set.add_requirement(req) - - for req in args: - requirement_set.add_requirement( - InstallRequirement.from_line( - req, None, isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) - ) - - for req in options.editables: - requirement_set.add_requirement( - InstallRequirement.from_editable( - req, - default_vcs=options.default_vcs, - isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) - ) - - found_req_in_file = False - for filename in options.requirements: - for req in parse_requirements( - filename, - finder=finder, options=options, session=session, - wheel_cache=wheel_cache): - found_req_in_file = True - requirement_set.add_requirement(req) - # If --require-hashes was a line in a requirements file, tell - # RequirementSet about it: - requirement_set.require_hashes = options.require_hashes - - if not (args or options.editables or found_req_in_file): - opts = {'name': name} - if options.find_links: - msg = ('You must give at least one requirement to ' - '%(name)s (maybe you meant "pip %(name)s ' - '%(links)s"?)' % - dict(opts, links=' '.join(options.find_links))) - else: - msg = ('You must give at least one requirement ' - 'to %(name)s (see "pip help %(name)s")' % opts) - logger.warning(msg) - - def _build_package_finder(self, options, session, - platform=None, python_versions=None, - abi=None, implementation=None): - """ - Create a package finder appropriate to this requirement command. - """ - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - return PackageFinder( - find_links=options.find_links, - format_control=options.format_control, - index_urls=index_urls, - trusted_hosts=options.trusted_hosts, - allow_all_prereleases=options.pre, - process_dependency_links=options.process_dependency_links, - session=session, - platform=platform, - versions=python_versions, - abi=abi, - implementation=implementation, - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/baseparser.py b/classifier/myenv/lib/python3.6/site-packages/pip/baseparser.py deleted file mode 100644 index 2dd4533..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/baseparser.py +++ /dev/null @@ -1,293 +0,0 @@ -"""Base option parser setup""" -from __future__ import absolute_import - -import sys -import optparse -import os -import re -import textwrap -from distutils.util import strtobool - -from pip._vendor.six import string_types -from pip._vendor.six.moves import configparser -from pip.locations import ( - legacy_config_file, config_basename, running_under_virtualenv, - site_config_files -) -from pip.utils import appdirs, get_terminal_size - - -_environ_prefix_re = re.compile(r"^PIP_", re.I) - - -class PrettyHelpFormatter(optparse.IndentedHelpFormatter): - """A prettier/less verbose help formatter for optparse.""" - - def __init__(self, *args, **kwargs): - # help position must be aligned with __init__.parseopts.description - kwargs['max_help_position'] = 30 - kwargs['indent_increment'] = 1 - kwargs['width'] = get_terminal_size()[0] - 2 - optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs) - - def format_option_strings(self, option): - return self._format_option_strings(option, ' <%s>', ', ') - - def _format_option_strings(self, option, mvarfmt=' <%s>', optsep=', '): - """ - Return a comma-separated list of option strings and metavars. - - :param option: tuple of (short opt, long opt), e.g: ('-f', '--format') - :param mvarfmt: metavar format string - evaluated as mvarfmt % metavar - :param optsep: separator - """ - opts = [] - - if option._short_opts: - opts.append(option._short_opts[0]) - if option._long_opts: - opts.append(option._long_opts[0]) - if len(opts) > 1: - opts.insert(1, optsep) - - if option.takes_value(): - metavar = option.metavar or option.dest.lower() - opts.append(mvarfmt % metavar.lower()) - - return ''.join(opts) - - def format_heading(self, heading): - if heading == 'Options': - return '' - return heading + ':\n' - - def format_usage(self, usage): - """ - Ensure there is only one newline between usage and the first heading - if there is no description. - """ - msg = '\nUsage: %s\n' % self.indent_lines(textwrap.dedent(usage), " ") - return msg - - def format_description(self, description): - # leave full control over description to us - if description: - if hasattr(self.parser, 'main'): - label = 'Commands' - else: - label = 'Description' - # some doc strings have initial newlines, some don't - description = description.lstrip('\n') - # some doc strings have final newlines and spaces, some don't - description = description.rstrip() - # dedent, then reindent - description = self.indent_lines(textwrap.dedent(description), " ") - description = '%s:\n%s\n' % (label, description) - return description - else: - return '' - - def format_epilog(self, epilog): - # leave full control over epilog to us - if epilog: - return epilog - else: - return '' - - def indent_lines(self, text, indent): - new_lines = [indent + line for line in text.split('\n')] - return "\n".join(new_lines) - - -class UpdatingDefaultsHelpFormatter(PrettyHelpFormatter): - """Custom help formatter for use in ConfigOptionParser. - - This is updates the defaults before expanding them, allowing - them to show up correctly in the help listing. - """ - - def expand_default(self, option): - if self.parser is not None: - self.parser._update_defaults(self.parser.defaults) - return optparse.IndentedHelpFormatter.expand_default(self, option) - - -class CustomOptionParser(optparse.OptionParser): - - def insert_option_group(self, idx, *args, **kwargs): - """Insert an OptionGroup at a given position.""" - group = self.add_option_group(*args, **kwargs) - - self.option_groups.pop() - self.option_groups.insert(idx, group) - - return group - - @property - def option_list_all(self): - """Get a list of all options, including those in option groups.""" - res = self.option_list[:] - for i in self.option_groups: - res.extend(i.option_list) - - return res - - -class ConfigOptionParser(CustomOptionParser): - """Custom option parser which updates its defaults by checking the - configuration files and environmental variables""" - - isolated = False - - def __init__(self, *args, **kwargs): - self.config = configparser.RawConfigParser() - self.name = kwargs.pop('name') - self.isolated = kwargs.pop("isolated", False) - self.files = self.get_config_files() - if self.files: - self.config.read(self.files) - assert self.name - optparse.OptionParser.__init__(self, *args, **kwargs) - - def get_config_files(self): - # the files returned by this method will be parsed in order with the - # first files listed being overridden by later files in standard - # ConfigParser fashion - config_file = os.environ.get('PIP_CONFIG_FILE', False) - if config_file == os.devnull: - return [] - - # at the base we have any site-wide configuration - files = list(site_config_files) - - # per-user configuration next - if not self.isolated: - if config_file and os.path.exists(config_file): - files.append(config_file) - else: - # This is the legacy config file, we consider it to be a lower - # priority than the new file location. - files.append(legacy_config_file) - - # This is the new config file, we consider it to be a higher - # priority than the legacy file. - files.append( - os.path.join( - appdirs.user_config_dir("pip"), - config_basename, - ) - ) - - # finally virtualenv configuration first trumping others - if running_under_virtualenv(): - venv_config_file = os.path.join( - sys.prefix, - config_basename, - ) - if os.path.exists(venv_config_file): - files.append(venv_config_file) - - return files - - def check_default(self, option, key, val): - try: - return option.check_value(key, val) - except optparse.OptionValueError as exc: - print("An error occurred during configuration: %s" % exc) - sys.exit(3) - - def _update_defaults(self, defaults): - """Updates the given defaults with values from the config files and - the environ. Does a little special handling for certain types of - options (lists).""" - # Then go and look for the other sources of configuration: - config = {} - # 1. config files - for section in ('global', self.name): - config.update( - self.normalize_keys(self.get_config_section(section)) - ) - # 2. environmental variables - if not self.isolated: - config.update(self.normalize_keys(self.get_environ_vars())) - # Accumulate complex default state. - self.values = optparse.Values(self.defaults) - late_eval = set() - # Then set the options with those values - for key, val in config.items(): - # ignore empty values - if not val: - continue - - option = self.get_option(key) - # Ignore options not present in this parser. E.g. non-globals put - # in [global] by users that want them to apply to all applicable - # commands. - if option is None: - continue - - if option.action in ('store_true', 'store_false', 'count'): - val = strtobool(val) - elif option.action == 'append': - val = val.split() - val = [self.check_default(option, key, v) for v in val] - elif option.action == 'callback': - late_eval.add(option.dest) - opt_str = option.get_opt_string() - val = option.convert_value(opt_str, val) - # From take_action - args = option.callback_args or () - kwargs = option.callback_kwargs or {} - option.callback(option, opt_str, val, self, *args, **kwargs) - else: - val = self.check_default(option, key, val) - - defaults[option.dest] = val - - for key in late_eval: - defaults[key] = getattr(self.values, key) - self.values = None - return defaults - - def normalize_keys(self, items): - """Return a config dictionary with normalized keys regardless of - whether the keys were specified in environment variables or in config - files""" - normalized = {} - for key, val in items: - key = key.replace('_', '-') - if not key.startswith('--'): - key = '--%s' % key # only prefer long opts - normalized[key] = val - return normalized - - def get_config_section(self, name): - """Get a section of a configuration""" - if self.config.has_section(name): - return self.config.items(name) - return [] - - def get_environ_vars(self): - """Returns a generator with all environmental vars with prefix PIP_""" - for key, val in os.environ.items(): - if _environ_prefix_re.search(key): - yield (_environ_prefix_re.sub("", key).lower(), val) - - def get_default_values(self): - """Overriding to make updating the defaults after instantiation of - the option parser possible, _update_defaults() does the dirty work.""" - if not self.process_default_values: - # Old, pre-Optik 1.5 behaviour. - return optparse.Values(self.defaults) - - defaults = self._update_defaults(self.defaults.copy()) # ours - for option in self._get_all_options(): - default = defaults.get(option.dest) - if isinstance(default, string_types): - opt_str = option.get_opt_string() - defaults[option.dest] = option.check_value(opt_str, default) - return optparse.Values(defaults) - - def error(self, msg): - self.print_usage(sys.stderr) - self.exit(2, "%s\n" % msg) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/cmdoptions.py b/classifier/myenv/lib/python3.6/site-packages/pip/cmdoptions.py deleted file mode 100644 index f75c093..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/cmdoptions.py +++ /dev/null @@ -1,633 +0,0 @@ -""" -shared options and groups - -The principle here is to define options once, but *not* instantiate them -globally. One reason being that options with action='append' can carry state -between parses. pip parses general options twice internally, and shouldn't -pass on state. To be consistent, all options will follow this design. - -""" -from __future__ import absolute_import - -from functools import partial -from optparse import OptionGroup, SUPPRESS_HELP, Option -import warnings - -from pip.index import ( - FormatControl, fmt_ctl_handle_mutual_exclude, fmt_ctl_no_binary, - fmt_ctl_no_use_wheel) -from pip.models import PyPI -from pip.locations import USER_CACHE_DIR, src_prefix -from pip.utils.hashes import STRONG_HASHES - - -def make_option_group(group, parser): - """ - Return an OptionGroup object - group -- assumed to be dict with 'name' and 'options' keys - parser -- an optparse Parser - """ - option_group = OptionGroup(parser, group['name']) - for option in group['options']: - option_group.add_option(option()) - return option_group - - -def resolve_wheel_no_use_binary(options): - if not options.use_wheel: - control = options.format_control - fmt_ctl_no_use_wheel(control) - - -def check_install_build_global(options, check_options=None): - """Disable wheels if per-setup.py call options are set. - - :param options: The OptionParser options to update. - :param check_options: The options to check, if not supplied defaults to - options. - """ - if check_options is None: - check_options = options - - def getname(n): - return getattr(check_options, n, None) - names = ["build_options", "global_options", "install_options"] - if any(map(getname, names)): - control = options.format_control - fmt_ctl_no_binary(control) - warnings.warn( - 'Disabling all use of wheels due to the use of --build-options ' - '/ --global-options / --install-options.', stacklevel=2) - - -########### -# options # -########### - -help_ = partial( - Option, - '-h', '--help', - dest='help', - action='help', - help='Show help.') - -isolated_mode = partial( - Option, - "--isolated", - dest="isolated_mode", - action="store_true", - default=False, - help=( - "Run pip in an isolated mode, ignoring environment variables and user " - "configuration." - ), -) - -require_virtualenv = partial( - Option, - # Run only if inside a virtualenv, bail if not. - '--require-virtualenv', '--require-venv', - dest='require_venv', - action='store_true', - default=False, - help=SUPPRESS_HELP) - -verbose = partial( - Option, - '-v', '--verbose', - dest='verbose', - action='count', - default=0, - help='Give more output. Option is additive, and can be used up to 3 times.' -) - -version = partial( - Option, - '-V', '--version', - dest='version', - action='store_true', - help='Show version and exit.') - -quiet = partial( - Option, - '-q', '--quiet', - dest='quiet', - action='count', - default=0, - help=('Give less output. Option is additive, and can be used up to 3' - ' times (corresponding to WARNING, ERROR, and CRITICAL logging' - ' levels).') -) - -log = partial( - Option, - "--log", "--log-file", "--local-log", - dest="log", - metavar="path", - help="Path to a verbose appending log." -) - -no_input = partial( - Option, - # Don't ask for input - '--no-input', - dest='no_input', - action='store_true', - default=False, - help=SUPPRESS_HELP) - -proxy = partial( - Option, - '--proxy', - dest='proxy', - type='str', - default='', - help="Specify a proxy in the form [user:passwd@]proxy.server:port.") - -retries = partial( - Option, - '--retries', - dest='retries', - type='int', - default=5, - help="Maximum number of retries each connection should attempt " - "(default %default times).") - -timeout = partial( - Option, - '--timeout', '--default-timeout', - metavar='sec', - dest='timeout', - type='float', - default=15, - help='Set the socket timeout (default %default seconds).') - -default_vcs = partial( - Option, - # The default version control system for editables, e.g. 'svn' - '--default-vcs', - dest='default_vcs', - type='str', - default='', - help=SUPPRESS_HELP) - -skip_requirements_regex = partial( - Option, - # A regex to be used to skip requirements - '--skip-requirements-regex', - dest='skip_requirements_regex', - type='str', - default='', - help=SUPPRESS_HELP) - - -def exists_action(): - return Option( - # Option when path already exist - '--exists-action', - dest='exists_action', - type='choice', - choices=['s', 'i', 'w', 'b', 'a'], - default=[], - action='append', - metavar='action', - help="Default action when a path already exists: " - "(s)witch, (i)gnore, (w)ipe, (b)ackup, (a)bort.") - - -cert = partial( - Option, - '--cert', - dest='cert', - type='str', - metavar='path', - help="Path to alternate CA bundle.") - -client_cert = partial( - Option, - '--client-cert', - dest='client_cert', - type='str', - default=None, - metavar='path', - help="Path to SSL client certificate, a single file containing the " - "private key and the certificate in PEM format.") - -index_url = partial( - Option, - '-i', '--index-url', '--pypi-url', - dest='index_url', - metavar='URL', - default=PyPI.simple_url, - help="Base URL of Python Package Index (default %default). " - "This should point to a repository compliant with PEP 503 " - "(the simple repository API) or a local directory laid out " - "in the same format.") - - -def extra_index_url(): - return Option( - '--extra-index-url', - dest='extra_index_urls', - metavar='URL', - action='append', - default=[], - help="Extra URLs of package indexes to use in addition to " - "--index-url. Should follow the same rules as " - "--index-url." - ) - - -no_index = partial( - Option, - '--no-index', - dest='no_index', - action='store_true', - default=False, - help='Ignore package index (only looking at --find-links URLs instead).') - - -def find_links(): - return Option( - '-f', '--find-links', - dest='find_links', - action='append', - default=[], - metavar='url', - help="If a url or path to an html file, then parse for links to " - "archives. If a local path or file:// url that's a directory, " - "then look for archives in the directory listing.") - - -def allow_external(): - return Option( - "--allow-external", - dest="allow_external", - action="append", - default=[], - metavar="PACKAGE", - help=SUPPRESS_HELP, - ) - - -allow_all_external = partial( - Option, - "--allow-all-external", - dest="allow_all_external", - action="store_true", - default=False, - help=SUPPRESS_HELP, -) - - -def trusted_host(): - return Option( - "--trusted-host", - dest="trusted_hosts", - action="append", - metavar="HOSTNAME", - default=[], - help="Mark this host as trusted, even though it does not have valid " - "or any HTTPS.", - ) - - -# Remove after 7.0 -no_allow_external = partial( - Option, - "--no-allow-external", - dest="allow_all_external", - action="store_false", - default=False, - help=SUPPRESS_HELP, -) - - -# Remove --allow-insecure after 7.0 -def allow_unsafe(): - return Option( - "--allow-unverified", "--allow-insecure", - dest="allow_unverified", - action="append", - default=[], - metavar="PACKAGE", - help=SUPPRESS_HELP, - ) - -# Remove after 7.0 -no_allow_unsafe = partial( - Option, - "--no-allow-insecure", - dest="allow_all_insecure", - action="store_false", - default=False, - help=SUPPRESS_HELP -) - -# Remove after 1.5 -process_dependency_links = partial( - Option, - "--process-dependency-links", - dest="process_dependency_links", - action="store_true", - default=False, - help="Enable the processing of dependency links.", -) - - -def constraints(): - return Option( - '-c', '--constraint', - dest='constraints', - action='append', - default=[], - metavar='file', - help='Constrain versions using the given constraints file. ' - 'This option can be used multiple times.') - - -def requirements(): - return Option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help='Install from the given requirements file. ' - 'This option can be used multiple times.') - - -def editable(): - return Option( - '-e', '--editable', - dest='editables', - action='append', - default=[], - metavar='path/url', - help=('Install a project in editable mode (i.e. setuptools ' - '"develop mode") from a local project path or a VCS url.'), - ) - -src = partial( - Option, - '--src', '--source', '--source-dir', '--source-directory', - dest='src_dir', - metavar='dir', - default=src_prefix, - help='Directory to check out editable projects into. ' - 'The default in a virtualenv is "/src". ' - 'The default for global installs is "/src".' -) - -# XXX: deprecated, remove in 9.0 -use_wheel = partial( - Option, - '--use-wheel', - dest='use_wheel', - action='store_true', - default=True, - help=SUPPRESS_HELP, -) - -# XXX: deprecated, remove in 9.0 -no_use_wheel = partial( - Option, - '--no-use-wheel', - dest='use_wheel', - action='store_false', - default=True, - help=('Do not Find and prefer wheel archives when searching indexes and ' - 'find-links locations. DEPRECATED in favour of --no-binary.'), -) - - -def _get_format_control(values, option): - """Get a format_control object.""" - return getattr(values, option.dest) - - -def _handle_no_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( - value, existing.no_binary, existing.only_binary) - - -def _handle_only_binary(option, opt_str, value, parser): - existing = getattr(parser.values, option.dest) - fmt_ctl_handle_mutual_exclude( - value, existing.only_binary, existing.no_binary) - - -def no_binary(): - return Option( - "--no-binary", dest="format_control", action="callback", - callback=_handle_no_binary, type="str", - default=FormatControl(set(), set()), - help="Do not use binary packages. Can be supplied multiple times, and " - "each time adds to the existing value. Accepts either :all: to " - "disable all binary packages, :none: to empty the set, or one or " - "more package names with commas between them. Note that some " - "packages are tricky to compile and may fail to install when " - "this option is used on them.") - - -def only_binary(): - return Option( - "--only-binary", dest="format_control", action="callback", - callback=_handle_only_binary, type="str", - default=FormatControl(set(), set()), - help="Do not use source packages. Can be supplied multiple times, and " - "each time adds to the existing value. Accepts either :all: to " - "disable all source packages, :none: to empty the set, or one or " - "more package names with commas between them. Packages without " - "binary distributions will fail to install when this option is " - "used on them.") - - -cache_dir = partial( - Option, - "--cache-dir", - dest="cache_dir", - default=USER_CACHE_DIR, - metavar="dir", - help="Store the cache data in ." -) - -no_cache = partial( - Option, - "--no-cache-dir", - dest="cache_dir", - action="store_false", - help="Disable the cache.", -) - -no_deps = partial( - Option, - '--no-deps', '--no-dependencies', - dest='ignore_dependencies', - action='store_true', - default=False, - help="Don't install package dependencies.") - -build_dir = partial( - Option, - '-b', '--build', '--build-dir', '--build-directory', - dest='build_dir', - metavar='dir', - help='Directory to unpack packages into and build in.' -) - -ignore_requires_python = partial( - Option, - '--ignore-requires-python', - dest='ignore_requires_python', - action='store_true', - help='Ignore the Requires-Python information.') - -install_options = partial( - Option, - '--install-option', - dest='install_options', - action='append', - metavar='options', - help="Extra arguments to be supplied to the setup.py install " - "command (use like --install-option=\"--install-scripts=/usr/local/" - "bin\"). Use multiple --install-option options to pass multiple " - "options to setup.py install. If you are using an option with a " - "directory path, be sure to use absolute path.") - -global_options = partial( - Option, - '--global-option', - dest='global_options', - action='append', - metavar='options', - help="Extra global options to be supplied to the setup.py " - "call before the install command.") - -no_clean = partial( - Option, - '--no-clean', - action='store_true', - default=False, - help="Don't clean up build directories.") - -pre = partial( - Option, - '--pre', - action='store_true', - default=False, - help="Include pre-release and development versions. By default, " - "pip only finds stable versions.") - -disable_pip_version_check = partial( - Option, - "--disable-pip-version-check", - dest="disable_pip_version_check", - action="store_true", - default=True, - help="Don't periodically check PyPI to determine whether a new version " - "of pip is available for download. Implied with --no-index.") - -# Deprecated, Remove later -always_unzip = partial( - Option, - '-Z', '--always-unzip', - dest='always_unzip', - action='store_true', - help=SUPPRESS_HELP, -) - - -def _merge_hash(option, opt_str, value, parser): - """Given a value spelled "algo:digest", append the digest to a list - pointed to in a dict by the algo name.""" - if not parser.values.hashes: - parser.values.hashes = {} - try: - algo, digest = value.split(':', 1) - except ValueError: - parser.error('Arguments to %s must be a hash name ' - 'followed by a value, like --hash=sha256:abcde...' % - opt_str) - if algo not in STRONG_HASHES: - parser.error('Allowed hash algorithms for %s are %s.' % - (opt_str, ', '.join(STRONG_HASHES))) - parser.values.hashes.setdefault(algo, []).append(digest) - - -hash = partial( - Option, - '--hash', - # Hash values eventually end up in InstallRequirement.hashes due to - # __dict__ copying in process_line(). - dest='hashes', - action='callback', - callback=_merge_hash, - type='string', - help="Verify that the package's archive matches this " - 'hash before installing. Example: --hash=sha256:abcdef...') - - -require_hashes = partial( - Option, - '--require-hashes', - dest='require_hashes', - action='store_true', - default=False, - help='Require a hash to check each requirement against, for ' - 'repeatable installs. This option is implied when any package in a ' - 'requirements file has a --hash option.') - - -########## -# groups # -########## - -general_group = { - 'name': 'General Options', - 'options': [ - help_, - isolated_mode, - require_virtualenv, - verbose, - version, - quiet, - log, - no_input, - proxy, - retries, - timeout, - default_vcs, - skip_requirements_regex, - exists_action, - trusted_host, - cert, - client_cert, - cache_dir, - no_cache, - disable_pip_version_check, - ] -} - -non_deprecated_index_group = { - 'name': 'Package Index Options', - 'options': [ - index_url, - extra_index_url, - no_index, - find_links, - process_dependency_links, - ] -} - -index_group = { - 'name': 'Package Index Options (including deprecated options)', - 'options': non_deprecated_index_group['options'] + [ - allow_external, - allow_all_external, - no_allow_external, - allow_unsafe, - no_allow_unsafe, - ] -} diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__init__.py deleted file mode 100644 index 62c64eb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__init__.py +++ /dev/null @@ -1,86 +0,0 @@ -""" -Package containing all pip commands -""" -from __future__ import absolute_import - -from pip.commands.completion import CompletionCommand -from pip.commands.download import DownloadCommand -from pip.commands.freeze import FreezeCommand -from pip.commands.hash import HashCommand -from pip.commands.help import HelpCommand -from pip.commands.list import ListCommand -from pip.commands.check import CheckCommand -from pip.commands.search import SearchCommand -from pip.commands.show import ShowCommand -from pip.commands.install import InstallCommand -from pip.commands.uninstall import UninstallCommand -from pip.commands.wheel import WheelCommand - - -commands_dict = { - CompletionCommand.name: CompletionCommand, - FreezeCommand.name: FreezeCommand, - HashCommand.name: HashCommand, - HelpCommand.name: HelpCommand, - SearchCommand.name: SearchCommand, - ShowCommand.name: ShowCommand, - InstallCommand.name: InstallCommand, - UninstallCommand.name: UninstallCommand, - DownloadCommand.name: DownloadCommand, - ListCommand.name: ListCommand, - CheckCommand.name: CheckCommand, - WheelCommand.name: WheelCommand, -} - - -commands_order = [ - InstallCommand, - DownloadCommand, - UninstallCommand, - FreezeCommand, - ListCommand, - ShowCommand, - CheckCommand, - SearchCommand, - WheelCommand, - HashCommand, - CompletionCommand, - HelpCommand, -] - - -def get_summaries(ordered=True): - """Yields sorted (command name, command summary) tuples.""" - - if ordered: - cmditems = _sort_commands(commands_dict, commands_order) - else: - cmditems = commands_dict.items() - - for name, command_class in cmditems: - yield (name, command_class.summary) - - -def get_similar_commands(name): - """Command name auto-correct.""" - from difflib import get_close_matches - - name = name.lower() - - close_commands = get_close_matches(name, commands_dict.keys()) - - if close_commands: - return close_commands[0] - else: - return False - - -def _sort_commands(cmddict, order): - def keyfn(key): - try: - return order.index(key[1]) - except ValueError: - # unordered items should come last - return 0xff - - return sorted(cmddict.items(), key=keyfn) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 8eaa7ce..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/check.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/check.cpython-36.pyc deleted file mode 100644 index 9512f49..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/check.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/completion.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/completion.cpython-36.pyc deleted file mode 100644 index 2568ad7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/completion.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/download.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/download.cpython-36.pyc deleted file mode 100644 index 732fcef..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/download.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/freeze.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/freeze.cpython-36.pyc deleted file mode 100644 index 66972f7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/freeze.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/hash.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/hash.cpython-36.pyc deleted file mode 100644 index 23212a8..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/hash.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/help.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/help.cpython-36.pyc deleted file mode 100644 index f7aabac..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/help.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/install.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/install.cpython-36.pyc deleted file mode 100644 index 040b4d9..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/install.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/list.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/list.cpython-36.pyc deleted file mode 100644 index 9bd3cfe..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/list.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/search.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/search.cpython-36.pyc deleted file mode 100644 index 356dd19..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/search.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/show.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/show.cpython-36.pyc deleted file mode 100644 index 506b694..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/show.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/uninstall.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/uninstall.cpython-36.pyc deleted file mode 100644 index c52f767..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/uninstall.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/wheel.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/wheel.cpython-36.pyc deleted file mode 100644 index f602c18..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/commands/__pycache__/wheel.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/check.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/check.py deleted file mode 100644 index 70458ad..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/check.py +++ /dev/null @@ -1,39 +0,0 @@ -import logging - -from pip.basecommand import Command -from pip.operations.check import check_requirements -from pip.utils import get_installed_distributions - - -logger = logging.getLogger(__name__) - - -class CheckCommand(Command): - """Verify installed packages have compatible dependencies.""" - name = 'check' - usage = """ - %prog [options]""" - summary = 'Verify installed packages have compatible dependencies.' - - def run(self, options, args): - dists = get_installed_distributions(local_only=False, skip=()) - missing_reqs_dict, incompatible_reqs_dict = check_requirements(dists) - - for dist in dists: - key = '%s==%s' % (dist.project_name, dist.version) - - for requirement in missing_reqs_dict.get(key, []): - logger.info( - "%s %s requires %s, which is not installed.", - dist.project_name, dist.version, requirement.project_name) - - for requirement, actual in incompatible_reqs_dict.get(key, []): - logger.info( - "%s %s has requirement %s, but you have %s %s.", - dist.project_name, dist.version, requirement, - actual.project_name, actual.version) - - if missing_reqs_dict or incompatible_reqs_dict: - return 1 - else: - logger.info("No broken requirements found.") diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/completion.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/completion.py deleted file mode 100644 index 66e41a6..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/completion.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import absolute_import - -import sys -from pip.basecommand import Command - -BASE_COMPLETION = """ -# pip %(shell)s completion start%(script)s# pip %(shell)s completion end -""" - -COMPLETION_SCRIPTS = { - 'bash': """ -_pip_completion() -{ - COMPREPLY=( $( COMP_WORDS="${COMP_WORDS[*]}" \\ - COMP_CWORD=$COMP_CWORD \\ - PIP_AUTO_COMPLETE=1 $1 ) ) -} -complete -o default -F _pip_completion pip -""", 'zsh': """ -function _pip_completion { - local words cword - read -Ac words - read -cn cword - reply=( $( COMP_WORDS="$words[*]" \\ - COMP_CWORD=$(( cword-1 )) \\ - PIP_AUTO_COMPLETE=1 $words[1] ) ) -} -compctl -K _pip_completion pip -""", 'fish': """ -function __fish_complete_pip - set -lx COMP_WORDS (commandline -o) "" - set -lx COMP_CWORD (math (contains -i -- (commandline -t) $COMP_WORDS)-1) - set -lx PIP_AUTO_COMPLETE 1 - string split \ -- (eval $COMP_WORDS[1]) -end -complete -fa "(__fish_complete_pip)" -c pip -"""} - - -class CompletionCommand(Command): - """A helper command to be used for command completion.""" - name = 'completion' - summary = 'A helper command used for command completion.' - - def __init__(self, *args, **kw): - super(CompletionCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '--bash', '-b', - action='store_const', - const='bash', - dest='shell', - help='Emit completion code for bash') - cmd_opts.add_option( - '--zsh', '-z', - action='store_const', - const='zsh', - dest='shell', - help='Emit completion code for zsh') - cmd_opts.add_option( - '--fish', '-f', - action='store_const', - const='fish', - dest='shell', - help='Emit completion code for fish') - - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - """Prints the completion code of the given shell""" - shells = COMPLETION_SCRIPTS.keys() - shell_options = ['--' + shell for shell in sorted(shells)] - if options.shell in shells: - script = COMPLETION_SCRIPTS.get(options.shell, '') - print(BASE_COMPLETION % {'script': script, 'shell': options.shell}) - else: - sys.stderr.write( - 'ERROR: You must pass %s\n' % ' or '.join(shell_options) - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/download.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/download.py deleted file mode 100644 index 4bc0640..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/download.py +++ /dev/null @@ -1,212 +0,0 @@ -from __future__ import absolute_import - -import logging -import os - -from pip.exceptions import CommandError -from pip.index import FormatControl -from pip.req import RequirementSet -from pip.basecommand import RequirementCommand -from pip import cmdoptions -from pip.utils import ensure_dir, normalize_path -from pip.utils.build import BuildDirectory -from pip.utils.filesystem import check_path_owner - - -logger = logging.getLogger(__name__) - - -class DownloadCommand(RequirementCommand): - """ - Download packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports downloading from "requirements files", which provide - an easy way to specify a whole environment to be downloaded. - """ - name = 'download' - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Download packages.' - - def __init__(self, *args, **kw): - super(DownloadCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.build_dir()) - cmd_opts.add_option(cmdoptions.no_deps()) - cmd_opts.add_option(cmdoptions.global_options()) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.src()) - cmd_opts.add_option(cmdoptions.pre()) - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - - cmd_opts.add_option( - '-d', '--dest', '--destination-dir', '--destination-directory', - dest='download_dir', - metavar='dir', - default=os.curdir, - help=("Download packages into ."), - ) - - cmd_opts.add_option( - '--platform', - dest='platform', - metavar='platform', - default=None, - help=("Only download wheels compatible with . " - "Defaults to the platform of the running system."), - ) - - cmd_opts.add_option( - '--python-version', - dest='python_version', - metavar='python_version', - default=None, - help=("Only download wheels compatible with Python " - "interpreter version . If not specified, then the " - "current system interpreter minor version is used. A major " - "version (e.g. '2') can be specified to match all " - "minor revs of that major version. A minor version " - "(e.g. '34') can also be specified."), - ) - - cmd_opts.add_option( - '--implementation', - dest='implementation', - metavar='implementation', - default=None, - help=("Only download wheels compatible with Python " - "implementation , e.g. 'pp', 'jy', 'cp', " - " or 'ip'. If not specified, then the current " - "interpreter implementation is used. Use 'py' to force " - "implementation-agnostic wheels."), - ) - - cmd_opts.add_option( - '--abi', - dest='abi', - metavar='abi', - default=None, - help=("Only download wheels compatible with Python " - "abi , e.g. 'pypy_41'. If not specified, then the " - "current interpreter abi tag is used. Generally " - "you will need to specify --implementation, " - "--platform, and --python-version when using " - "this option."), - ) - - index_opts = cmdoptions.make_option_group( - cmdoptions.non_deprecated_index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - options.ignore_installed = True - - if options.python_version: - python_versions = [options.python_version] - else: - python_versions = None - - dist_restriction_set = any([ - options.python_version, - options.platform, - options.abi, - options.implementation, - ]) - binary_only = FormatControl(set(), set([':all:'])) - if dist_restriction_set and options.format_control != binary_only: - raise CommandError( - "--only-binary=:all: must be set and --no-binary must not " - "be set (or must be set to :none:) when restricting platform " - "and interpreter constraints using --python-version, " - "--platform, --abi, or --implementation." - ) - - options.src_dir = os.path.abspath(options.src_dir) - options.download_dir = normalize_path(options.download_dir) - - ensure_dir(options.download_dir) - - with self._build_session(options) as session: - finder = self._build_package_finder( - options=options, - session=session, - platform=options.platform, - python_versions=python_versions, - abi=options.abi, - implementation=options.implementation, - ) - build_delete = (not (options.no_clean or options.build_dir)) - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: - - requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=options.download_dir, - ignore_installed=True, - ignore_dependencies=options.ignore_dependencies, - session=session, - isolated=options.isolated_mode, - require_hashes=options.require_hashes - ) - self.populate_requirement_set( - requirement_set, - args, - options, - finder, - session, - self.name, - None - ) - - if not requirement_set.has_requirements: - return - - requirement_set.prepare_files(finder) - - downloaded = ' '.join([ - req.name for req in requirement_set.successfully_downloaded - ]) - if downloaded: - logger.info( - 'Successfully downloaded %s', downloaded - ) - - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - - return requirement_set diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/freeze.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/freeze.py deleted file mode 100644 index c198796..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/freeze.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import absolute_import - -import sys - -import pip -from pip.compat import stdlib_pkgs -from pip.basecommand import Command -from pip.operations.freeze import freeze -from pip.wheel import WheelCache - - -DEV_PKGS = ('pip', 'setuptools', 'distribute', 'wheel') - - -class FreezeCommand(Command): - """ - Output installed packages in requirements format. - - packages are listed in a case-insensitive sorted order. - """ - name = 'freeze' - usage = """ - %prog [options]""" - summary = 'Output installed packages in requirements format.' - log_streams = ("ext://sys.stderr", "ext://sys.stderr") - - def __init__(self, *args, **kw): - super(FreezeCommand, self).__init__(*args, **kw) - - self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help="Use the order in the given requirements file and its " - "comments when generating output. This option can be " - "used multiple times.") - self.cmd_opts.add_option( - '-f', '--find-links', - dest='find_links', - action='append', - default=[], - metavar='URL', - help='URL for finding packages, which will be added to the ' - 'output.') - self.cmd_opts.add_option( - '-l', '--local', - dest='local', - action='store_true', - default=False, - help='If in a virtualenv that has global access, do not output ' - 'globally-installed packages.') - self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', - default=False, - help='Only output packages installed in user-site.') - self.cmd_opts.add_option( - '--all', - dest='freeze_all', - action='store_true', - help='Do not skip these packages in the output:' - ' %s' % ', '.join(DEV_PKGS)) - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - format_control = pip.index.FormatControl(set(), set()) - wheel_cache = WheelCache(options.cache_dir, format_control) - skip = set(stdlib_pkgs) - if not options.freeze_all: - skip.update(DEV_PKGS) - - freeze_kwargs = dict( - requirement=options.requirements, - find_links=options.find_links, - local_only=options.local, - user_only=options.user, - skip_regex=options.skip_requirements_regex, - isolated=options.isolated_mode, - wheel_cache=wheel_cache, - skip=skip) - - for line in freeze(**freeze_kwargs): - sys.stdout.write(line + '\n') diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/hash.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/hash.py deleted file mode 100644 index 27cca0b..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/hash.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import absolute_import - -import hashlib -import logging -import sys - -from pip.basecommand import Command -from pip.status_codes import ERROR -from pip.utils import read_chunks -from pip.utils.hashes import FAVORITE_HASH, STRONG_HASHES - - -logger = logging.getLogger(__name__) - - -class HashCommand(Command): - """ - Compute a hash of a local package archive. - - These can be used with --hash in a requirements file to do repeatable - installs. - - """ - name = 'hash' - usage = '%prog [options] ...' - summary = 'Compute hashes of package archives.' - - def __init__(self, *args, **kw): - super(HashCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-a', '--algorithm', - dest='algorithm', - choices=STRONG_HASHES, - action='store', - default=FAVORITE_HASH, - help='The hash algorithm to use: one of %s' % - ', '.join(STRONG_HASHES)) - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - self.parser.print_usage(sys.stderr) - return ERROR - - algorithm = options.algorithm - for path in args: - logger.info('%s:\n--hash=%s:%s', - path, algorithm, _hash_of_file(path, algorithm)) - - -def _hash_of_file(path, algorithm): - """Return the hash digest of a file.""" - with open(path, 'rb') as archive: - hash = hashlib.new(algorithm) - for chunk in read_chunks(archive): - hash.update(chunk) - return hash.hexdigest() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/help.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/help.py deleted file mode 100644 index 11722f1..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/help.py +++ /dev/null @@ -1,35 +0,0 @@ -from __future__ import absolute_import - -from pip.basecommand import Command, SUCCESS -from pip.exceptions import CommandError - - -class HelpCommand(Command): - """Show help for commands""" - name = 'help' - usage = """ - %prog """ - summary = 'Show help for commands.' - - def run(self, options, args): - from pip.commands import commands_dict, get_similar_commands - - try: - # 'pip help' with no args is handled by pip.__init__.parseopt() - cmd_name = args[0] # the command we need help for - except IndexError: - return SUCCESS - - if cmd_name not in commands_dict: - guess = get_similar_commands(cmd_name) - - msg = ['unknown command "%s"' % cmd_name] - if guess: - msg.append('maybe you meant "%s"' % guess) - - raise CommandError(' - '.join(msg)) - - command = commands_dict[cmd_name]() - command.parser.print_help() - - return SUCCESS diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/install.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/install.py deleted file mode 100644 index 39292b1..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/install.py +++ /dev/null @@ -1,455 +0,0 @@ -from __future__ import absolute_import - -import logging -import operator -import os -import tempfile -import shutil -import warnings -try: - import wheel -except ImportError: - wheel = None - -from pip.req import RequirementSet -from pip.basecommand import RequirementCommand -from pip.locations import virtualenv_no_global, distutils_scheme -from pip.exceptions import ( - InstallationError, CommandError, PreviousBuildDirError, -) -from pip import cmdoptions -from pip.utils import ensure_dir, get_installed_version -from pip.utils.build import BuildDirectory -from pip.utils.deprecation import RemovedInPip10Warning -from pip.utils.filesystem import check_path_owner -from pip.wheel import WheelCache, WheelBuilder - -from pip.locations import running_under_virtualenv - -logger = logging.getLogger(__name__) - - -class InstallCommand(RequirementCommand): - """ - Install packages from: - - - PyPI (and other indexes) using requirement specifiers. - - VCS project urls. - - Local project directories. - - Local or remote source archives. - - pip also supports installing from "requirements files", which provide - an easy way to specify a whole environment to be installed. - """ - name = 'install' - - usage = """ - %prog [options] [package-index-options] ... - %prog [options] -r [package-index-options] ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Install packages.' - - def __init__(self, *args, **kw): - super(InstallCommand, self).__init__(*args, **kw) - - default_user = True - if running_under_virtualenv(): - default_user = False - if os.geteuid() == 0: - default_user = False - - cmd_opts = self.cmd_opts - - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.build_dir()) - - cmd_opts.add_option( - '-t', '--target', - dest='target_dir', - metavar='dir', - default=None, - help='Install packages into . ' - 'By default this will not replace existing files/folders in ' - '. Use --upgrade to replace existing packages in ' - 'with new versions.' - ) - - cmd_opts.add_option( - '-d', '--download', '--download-dir', '--download-directory', - dest='download_dir', - metavar='dir', - default=None, - help=("Download packages into instead of installing them, " - "regardless of what's already installed."), - ) - - cmd_opts.add_option(cmdoptions.src()) - - cmd_opts.add_option( - '-U', '--upgrade', - dest='upgrade', - action='store_true', - help='Upgrade all specified packages to the newest available ' - 'version. The handling of dependencies depends on the ' - 'upgrade-strategy used.' - ) - - cmd_opts.add_option( - '--upgrade-strategy', - dest='upgrade_strategy', - default='eager', - choices=['only-if-needed', 'eager'], - help='Determines how dependency upgrading should be handled. ' - '"eager" - dependencies are upgraded regardless of ' - 'whether the currently installed version satisfies the ' - 'requirements of the upgraded package(s). ' - '"only-if-needed" - are upgraded only when they do not ' - 'satisfy the requirements of the upgraded package(s).' - ) - - cmd_opts.add_option( - '--force-reinstall', - dest='force_reinstall', - action='store_true', - help='When upgrading, reinstall all packages even if they are ' - 'already up-to-date.') - - cmd_opts.add_option( - '-I', '--ignore-installed', - dest='ignore_installed', - action='store_true', - default=default_user, - help='Ignore the installed packages (reinstalling instead).') - - cmd_opts.add_option(cmdoptions.ignore_requires_python()) - cmd_opts.add_option(cmdoptions.no_deps()) - - cmd_opts.add_option(cmdoptions.install_options()) - cmd_opts.add_option(cmdoptions.global_options()) - - cmd_opts.add_option( - '--user', - dest='use_user_site', - action='store_true', - default=default_user, - help="Install to the Python user install directory for your " - "platform. Typically ~/.local/, or %APPDATA%\Python on " - "Windows. (See the Python documentation for site.USER_BASE " - "for full details.) On Debian systems, this is the " - "default when running outside of a virtual environment " - "and not as root.") - - cmd_opts.add_option( - '--system', - dest='use_user_site', - action='store_false', - help="Install using the system scheme (overrides --user on " - "Debian systems)") - - cmd_opts.add_option( - '--egg', - dest='as_egg', - action='store_true', - help="Install packages as eggs, not 'flat', like pip normally " - "does. This option is not about installing *from* eggs. " - "(WARNING: Because this option overrides pip's normal install" - " logic, requirements files may not behave as expected.)") - - cmd_opts.add_option( - '--root', - dest='root_path', - metavar='dir', - default=None, - help="Install everything relative to this alternate root " - "directory.") - - cmd_opts.add_option( - '--prefix', - dest='prefix_path', - metavar='dir', - default=None, - help="Installation prefix where lib, bin and other top-level " - "folders are placed") - - cmd_opts.add_option( - "--compile", - action="store_true", - dest="compile", - default=True, - help="Compile py files to pyc", - ) - - cmd_opts.add_option( - "--no-compile", - action="store_false", - dest="compile", - help="Do not compile py files to pyc", - ) - - cmd_opts.add_option(cmdoptions.use_wheel()) - cmd_opts.add_option(cmdoptions.no_use_wheel()) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option(cmdoptions.pre()) - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def run(self, options, args): - cmdoptions.resolve_wheel_no_use_binary(options) - cmdoptions.check_install_build_global(options) - - if options.as_egg: - warnings.warn( - "--egg has been deprecated and will be removed in the future. " - "This flag is mutually exclusive with large parts of pip, and " - "actually using it invalidates pip's ability to manage the " - "installation process.", - RemovedInPip10Warning, - ) - - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.download_dir: - warnings.warn( - "pip install --download has been deprecated and will be " - "removed in the future. Pip now has a download command that " - "should be used instead.", - RemovedInPip10Warning, - ) - options.ignore_installed = True - - if options.build_dir: - options.build_dir = os.path.abspath(options.build_dir) - - options.src_dir = os.path.abspath(options.src_dir) - install_options = options.install_options or [] - if options.use_user_site: - if options.prefix_path: - raise CommandError( - "Can not combine '--user' and '--prefix' as they imply " - "different installation locations" - ) - if virtualenv_no_global(): - raise InstallationError( - "Can not perform a '--user' install. User site-packages " - "are not visible in this virtualenv." - ) - install_options.append('--user') - install_options.append('--prefix=') - - temp_target_dir = None - if options.target_dir: - options.ignore_installed = True - temp_target_dir = tempfile.mkdtemp() - options.target_dir = os.path.abspath(options.target_dir) - if (os.path.exists(options.target_dir) and not - os.path.isdir(options.target_dir)): - raise CommandError( - "Target path exists but is not a directory, will not " - "continue." - ) - install_options.append('--home=' + temp_target_dir) - - global_options = options.global_options or [] - - with self._build_session(options) as session: - - finder = self._build_package_finder(options, session) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - if options.cache_dir and not check_path_owner(options.cache_dir): - logger.warning( - "The directory '%s' or its parent directory is not owned " - "by the current user and caching wheels has been " - "disabled. check the permissions and owner of that " - "directory. If executing pip with sudo, you may want " - "sudo's -H flag.", - options.cache_dir, - ) - options.cache_dir = None - - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: - requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=options.download_dir, - upgrade=options.upgrade, - upgrade_strategy=options.upgrade_strategy, - as_egg=options.as_egg, - ignore_installed=options.ignore_installed, - ignore_dependencies=options.ignore_dependencies, - ignore_requires_python=options.ignore_requires_python, - force_reinstall=options.force_reinstall, - use_user_site=options.use_user_site, - target_dir=temp_target_dir, - session=session, - pycompile=options.compile, - isolated=options.isolated_mode, - wheel_cache=wheel_cache, - require_hashes=options.require_hashes, - ) - - self.populate_requirement_set( - requirement_set, args, options, finder, session, self.name, - wheel_cache - ) - - if not requirement_set.has_requirements: - return - - try: - if (options.download_dir or not wheel or not - options.cache_dir): - # on -d don't do complex things like building - # wheels, and don't try to build wheels when wheel is - # not installed. - requirement_set.prepare_files(finder) - else: - # build wheels before install. - wb = WheelBuilder( - requirement_set, - finder, - build_options=[], - global_options=[], - ) - # Ignore the result: a failed wheel will be - # installed from the sdist/vcs whatever. - wb.build(autobuilding=True) - - if not options.download_dir: - requirement_set.install( - install_options, - global_options, - root=options.root_path, - prefix=options.prefix_path, - ) - - possible_lib_locations = get_lib_location_guesses( - user=options.use_user_site, - home=temp_target_dir, - root=options.root_path, - prefix=options.prefix_path, - isolated=options.isolated_mode, - ) - reqs = sorted( - requirement_set.successfully_installed, - key=operator.attrgetter('name')) - items = [] - for req in reqs: - item = req.name - try: - installed_version = get_installed_version( - req.name, possible_lib_locations - ) - if installed_version: - item += '-' + installed_version - except Exception: - pass - items.append(item) - installed = ' '.join(items) - if installed: - logger.info('Successfully installed %s', installed) - else: - downloaded = ' '.join([ - req.name - for req in requirement_set.successfully_downloaded - ]) - if downloaded: - logger.info( - 'Successfully downloaded %s', downloaded - ) - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - # Clean up - if not options.no_clean: - requirement_set.cleanup_files() - - if options.target_dir: - ensure_dir(options.target_dir) - - # Checking both purelib and platlib directories for installed - # packages to be moved to target directory - lib_dir_list = [] - - purelib_dir = distutils_scheme('', home=temp_target_dir)['purelib'] - platlib_dir = distutils_scheme('', home=temp_target_dir)['platlib'] - - if os.path.exists(purelib_dir): - lib_dir_list.append(purelib_dir) - if os.path.exists(platlib_dir) and platlib_dir != purelib_dir: - lib_dir_list.append(platlib_dir) - - for lib_dir in lib_dir_list: - for item in os.listdir(lib_dir): - target_item_dir = os.path.join(options.target_dir, item) - if os.path.exists(target_item_dir): - if not options.upgrade: - logger.warning( - 'Target directory %s already exists. Specify ' - '--upgrade to force replacement.', - target_item_dir - ) - continue - if os.path.islink(target_item_dir): - logger.warning( - 'Target directory %s already exists and is ' - 'a link. Pip will not automatically replace ' - 'links, please remove if replacement is ' - 'desired.', - target_item_dir - ) - continue - if os.path.isdir(target_item_dir): - shutil.rmtree(target_item_dir) - else: - os.remove(target_item_dir) - - shutil.move( - os.path.join(lib_dir, item), - target_item_dir - ) - shutil.rmtree(temp_target_dir) - return requirement_set - - -def get_lib_location_guesses(*args, **kwargs): - scheme = distutils_scheme('', *args, **kwargs) - return [scheme['purelib'], scheme['platlib']] diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/list.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/list.py deleted file mode 100644 index 6f6995d..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/list.py +++ /dev/null @@ -1,337 +0,0 @@ -from __future__ import absolute_import - -import json -import logging -import warnings -try: - from itertools import zip_longest -except ImportError: - from itertools import izip_longest as zip_longest - -from pip._vendor import six - -from pip.basecommand import Command -from pip.exceptions import CommandError -from pip.index import PackageFinder -from pip.utils import ( - get_installed_distributions, dist_is_editable) -from pip.utils.deprecation import RemovedInPip10Warning -from pip.cmdoptions import make_option_group, index_group - -logger = logging.getLogger(__name__) - - -class ListCommand(Command): - """ - List installed packages, including editables. - - Packages are listed in a case-insensitive sorted order. - """ - name = 'list' - usage = """ - %prog [options]""" - summary = 'List installed packages.' - - def __init__(self, *args, **kw): - super(ListCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '-o', '--outdated', - action='store_true', - default=False, - help='List outdated packages') - cmd_opts.add_option( - '-u', '--uptodate', - action='store_true', - default=False, - help='List uptodate packages') - cmd_opts.add_option( - '-e', '--editable', - action='store_true', - default=False, - help='List editable projects.') - cmd_opts.add_option( - '-l', '--local', - action='store_true', - default=False, - help=('If in a virtualenv that has global access, do not list ' - 'globally-installed packages.'), - ) - self.cmd_opts.add_option( - '--user', - dest='user', - action='store_true', - default=False, - help='Only output packages installed in user-site.') - - cmd_opts.add_option( - '--pre', - action='store_true', - default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), - ) - - cmd_opts.add_option( - '--format', - action='store', - dest='list_format', - choices=('legacy', 'columns', 'freeze', 'json'), - help="Select the output format among: legacy (default), columns, " - "freeze or json.", - ) - - cmd_opts.add_option( - '--not-required', - action='store_true', - dest='not_required', - help="List packages that are not dependencies of " - "installed packages.", - ) - - index_opts = make_option_group(index_group, self.parser) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def _build_package_finder(self, options, index_urls, session): - """ - Create a package finder appropriate to this list command. - """ - return PackageFinder( - find_links=options.find_links, - index_urls=index_urls, - allow_all_prereleases=options.pre, - trusted_hosts=options.trusted_hosts, - process_dependency_links=options.process_dependency_links, - session=session, - ) - - def run(self, options, args): - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.list_format is None: - warnings.warn( - "The default format will switch to columns in the future. " - "You can use --format=(legacy|columns) (or define a " - "format=(legacy|columns) in your pip.conf under the [list] " - "section) to disable this warning.", - RemovedInPip10Warning, - ) - - if options.outdated and options.uptodate: - raise CommandError( - "Options --outdated and --uptodate cannot be combined.") - - packages = get_installed_distributions( - local_only=options.local, - user_only=options.user, - editables_only=options.editable, - ) - - if options.outdated: - packages = self.get_outdated(packages, options) - elif options.uptodate: - packages = self.get_uptodate(packages, options) - - if options.not_required: - packages = self.get_not_required(packages, options) - - self.output_package_listing(packages, options) - - def get_outdated(self, packages, options): - return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version > dist.parsed_version - ] - - def get_uptodate(self, packages, options): - return [ - dist for dist in self.iter_packages_latest_infos(packages, options) - if dist.latest_version == dist.parsed_version - ] - - def get_not_required(self, packages, options): - dep_keys = set() - for dist in packages: - dep_keys.update(requirement.key for requirement in dist.requires()) - return set(pkg for pkg in packages if pkg.key not in dep_keys) - - def iter_packages_latest_infos(self, packages, options): - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - dependency_links = [] - for dist in packages: - if dist.has_metadata('dependency_links.txt'): - dependency_links.extend( - dist.get_metadata_lines('dependency_links.txt'), - ) - - with self._build_session(options) as session: - finder = self._build_package_finder(options, index_urls, session) - finder.add_dependency_links(dependency_links) - - for dist in packages: - typ = 'unknown' - all_candidates = finder.find_all_candidates(dist.key) - if not options.pre: - # Remove prereleases - all_candidates = [candidate for candidate in all_candidates - if not candidate.version.is_prerelease] - - if not all_candidates: - continue - best_candidate = max(all_candidates, - key=finder._candidate_sort_key) - remote_version = best_candidate.version - if best_candidate.location.is_wheel: - typ = 'wheel' - else: - typ = 'sdist' - # This is dirty but makes the rest of the code much cleaner - dist.latest_version = remote_version - dist.latest_filetype = typ - yield dist - - def output_legacy(self, dist): - if dist_is_editable(dist): - return '%s (%s, %s)' % ( - dist.project_name, - dist.version, - dist.location, - ) - else: - return '%s (%s)' % (dist.project_name, dist.version) - - def output_legacy_latest(self, dist): - return '%s - Latest: %s [%s]' % ( - self.output_legacy(dist), - dist.latest_version, - dist.latest_filetype, - ) - - def output_package_listing(self, packages, options): - packages = sorted( - packages, - key=lambda dist: dist.project_name.lower(), - ) - if options.list_format == 'columns' and packages: - data, header = format_for_columns(packages, options) - self.output_package_listing_columns(data, header) - elif options.list_format == 'freeze': - for dist in packages: - logger.info("%s==%s", dist.project_name, dist.version) - elif options.list_format == 'json': - logger.info(format_for_json(packages, options)) - else: # legacy - for dist in packages: - if options.outdated: - logger.info(self.output_legacy_latest(dist)) - else: - logger.info(self.output_legacy(dist)) - - def output_package_listing_columns(self, data, header): - # insert the header first: we need to know the size of column names - if len(data) > 0: - data.insert(0, header) - - pkg_strings, sizes = tabulate(data) - - # Create and add a separator. - if len(data) > 0: - pkg_strings.insert(1, " ".join(map(lambda x: '-' * x, sizes))) - - for val in pkg_strings: - logger.info(val) - - -def tabulate(vals): - # From pfmoore on GitHub: - # https://github.com/pypa/pip/issues/3651#issuecomment-216932564 - assert len(vals) > 0 - - sizes = [0] * max(len(x) for x in vals) - for row in vals: - sizes = [max(s, len(str(c))) for s, c in zip_longest(sizes, row)] - - result = [] - for row in vals: - display = " ".join([str(c).ljust(s) if c is not None else '' - for s, c in zip_longest(sizes, row)]) - result.append(display) - - return result, sizes - - -def format_for_columns(pkgs, options): - """ - Convert the package data into something usable - by output_package_listing_columns. - """ - running_outdated = options.outdated - # Adjust the header for the `pip list --outdated` case. - if running_outdated: - header = ["Package", "Version", "Latest", "Type"] - else: - header = ["Package", "Version"] - - data = [] - if any(dist_is_editable(x) for x in pkgs): - header.append("Location") - - for proj in pkgs: - # if we're working on the 'outdated' list, separate out the - # latest_version and type - row = [proj.project_name, proj.version] - - if running_outdated: - row.append(proj.latest_version) - row.append(proj.latest_filetype) - - if dist_is_editable(proj): - row.append(proj.location) - - data.append(row) - - return data, header - - -def format_for_json(packages, options): - data = [] - for dist in packages: - info = { - 'name': dist.project_name, - 'version': six.text_type(dist.version), - } - if options.outdated: - info['latest_version'] = six.text_type(dist.latest_version) - info['latest_filetype'] = dist.latest_filetype - data.append(info) - return json.dumps(data) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/search.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/search.py deleted file mode 100644 index bd2ea8a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/search.py +++ /dev/null @@ -1,133 +0,0 @@ -from __future__ import absolute_import - -import logging -import sys -import textwrap - -from pip.basecommand import Command, SUCCESS -from pip.compat import OrderedDict -from pip.download import PipXmlrpcTransport -from pip.models import PyPI -from pip.utils import get_terminal_size -from pip.utils.logging import indent_log -from pip.exceptions import CommandError -from pip.status_codes import NO_MATCHES_FOUND -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor import pkg_resources -from pip._vendor.six.moves import xmlrpc_client - - -logger = logging.getLogger(__name__) - - -class SearchCommand(Command): - """Search for PyPI packages whose name or summary contains .""" - name = 'search' - usage = """ - %prog [options] """ - summary = 'Search PyPI for packages.' - - def __init__(self, *args, **kw): - super(SearchCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-i', '--index', - dest='index', - metavar='URL', - default=PyPI.pypi_url, - help='Base URL of Python Package Index (default %default)') - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - raise CommandError('Missing required argument (search query).') - query = args - pypi_hits = self.search(query, options) - hits = transform_hits(pypi_hits) - - terminal_width = None - if sys.stdout.isatty(): - terminal_width = get_terminal_size()[0] - - print_results(hits, terminal_width=terminal_width) - if pypi_hits: - return SUCCESS - return NO_MATCHES_FOUND - - def search(self, query, options): - index_url = options.index - with self._build_session(options) as session: - transport = PipXmlrpcTransport(index_url, session) - pypi = xmlrpc_client.ServerProxy(index_url, transport) - hits = pypi.search({'name': query, 'summary': query}, 'or') - return hits - - -def transform_hits(hits): - """ - The list from pypi is really a list of versions. We want a list of - packages with the list of versions stored inline. This converts the - list from pypi into one we can use. - """ - packages = OrderedDict() - for hit in hits: - name = hit['name'] - summary = hit['summary'] - version = hit['version'] - - if name not in packages.keys(): - packages[name] = { - 'name': name, - 'summary': summary, - 'versions': [version], - } - else: - packages[name]['versions'].append(version) - - # if this is the highest version, replace summary and score - if version == highest_version(packages[name]['versions']): - packages[name]['summary'] = summary - - return list(packages.values()) - - -def print_results(hits, name_column_width=None, terminal_width=None): - if not hits: - return - if name_column_width is None: - name_column_width = max([ - len(hit['name']) + len(hit.get('versions', ['-'])[-1]) - for hit in hits - ]) + 4 - - installed_packages = [p.project_name for p in pkg_resources.working_set] - for hit in hits: - name = hit['name'] - summary = hit['summary'] or '' - version = hit.get('versions', ['-'])[-1] - if terminal_width is not None: - target_width = terminal_width - name_column_width - 5 - if target_width > 10: - # wrap and indent summary to fit terminal - summary = textwrap.wrap(summary, target_width) - summary = ('\n' + ' ' * (name_column_width + 3)).join(summary) - - line = '%-*s - %s' % (name_column_width, - '%s (%s)' % (name, version), summary) - try: - logger.info(line) - if name in installed_packages: - dist = pkg_resources.get_distribution(name) - with indent_log(): - latest = highest_version(hit['versions']) - if dist.version == latest: - logger.info('INSTALLED: %s (latest)', dist.version) - else: - logger.info('INSTALLED: %s', dist.version) - logger.info('LATEST: %s', latest) - except UnicodeEncodeError: - pass - - -def highest_version(versions): - return max(versions, key=parse_version) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/show.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/show.py deleted file mode 100644 index 111c16d..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/show.py +++ /dev/null @@ -1,154 +0,0 @@ -from __future__ import absolute_import - -from email.parser import FeedParser -import logging -import os - -from pip.basecommand import Command -from pip.status_codes import SUCCESS, ERROR -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name - - -logger = logging.getLogger(__name__) - - -class ShowCommand(Command): - """Show information about one or more installed packages.""" - name = 'show' - usage = """ - %prog [options] ...""" - summary = 'Show information about installed packages.' - - def __init__(self, *args, **kw): - super(ShowCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-f', '--files', - dest='files', - action='store_true', - default=False, - help='Show the full list of installed files for each package.') - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - if not args: - logger.warning('ERROR: Please provide a package name or names.') - return ERROR - query = args - - results = search_packages_info(query) - if not print_results( - results, list_files=options.files, verbose=options.verbose): - return ERROR - return SUCCESS - - -def search_packages_info(query): - """ - Gather details from installed distributions. Print distribution name, - version, location, and installed files. Installed files requires a - pip generated 'installed-files.txt' in the distributions '.egg-info' - directory. - """ - installed = {} - for p in pkg_resources.working_set: - installed[canonicalize_name(p.project_name)] = p - - query_names = [canonicalize_name(name) for name in query] - - for dist in [installed[pkg] for pkg in query_names if pkg in installed]: - package = { - 'name': dist.project_name, - 'version': dist.version, - 'location': dist.location, - 'requires': [dep.project_name for dep in dist.requires()], - } - file_list = None - metadata = None - if isinstance(dist, pkg_resources.DistInfoDistribution): - # RECORDs should be part of .dist-info metadatas - if dist.has_metadata('RECORD'): - lines = dist.get_metadata_lines('RECORD') - paths = [l.split(',')[0] for l in lines] - paths = [os.path.join(dist.location, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('METADATA'): - metadata = dist.get_metadata('METADATA') - else: - # Otherwise use pip's log for .egg-info's - if dist.has_metadata('installed-files.txt'): - paths = dist.get_metadata_lines('installed-files.txt') - paths = [os.path.join(dist.egg_info, p) for p in paths] - file_list = [os.path.relpath(p, dist.location) for p in paths] - - if dist.has_metadata('PKG-INFO'): - metadata = dist.get_metadata('PKG-INFO') - - if dist.has_metadata('entry_points.txt'): - entry_points = dist.get_metadata_lines('entry_points.txt') - package['entry_points'] = entry_points - - if dist.has_metadata('INSTALLER'): - for line in dist.get_metadata_lines('INSTALLER'): - if line.strip(): - package['installer'] = line.strip() - break - - # @todo: Should pkg_resources.Distribution have a - # `get_pkg_info` method? - feed_parser = FeedParser() - feed_parser.feed(metadata) - pkg_info_dict = feed_parser.close() - for key in ('metadata-version', 'summary', - 'home-page', 'author', 'author-email', 'license'): - package[key] = pkg_info_dict.get(key) - - # It looks like FeedParser cannot deal with repeated headers - classifiers = [] - for line in metadata.splitlines(): - if line.startswith('Classifier: '): - classifiers.append(line[len('Classifier: '):]) - package['classifiers'] = classifiers - - if file_list: - package['files'] = sorted(file_list) - yield package - - -def print_results(distributions, list_files=False, verbose=False): - """ - Print the informations from installed distributions found. - """ - results_printed = False - for i, dist in enumerate(distributions): - results_printed = True - if i > 0: - logger.info("---") - logger.info("Name: %s", dist.get('name', '')) - logger.info("Version: %s", dist.get('version', '')) - logger.info("Summary: %s", dist.get('summary', '')) - logger.info("Home-page: %s", dist.get('home-page', '')) - logger.info("Author: %s", dist.get('author', '')) - logger.info("Author-email: %s", dist.get('author-email', '')) - logger.info("License: %s", dist.get('license', '')) - logger.info("Location: %s", dist.get('location', '')) - logger.info("Requires: %s", ', '.join(dist.get('requires', []))) - if verbose: - logger.info("Metadata-Version: %s", - dist.get('metadata-version', '')) - logger.info("Installer: %s", dist.get('installer', '')) - logger.info("Classifiers:") - for classifier in dist.get('classifiers', []): - logger.info(" %s", classifier) - logger.info("Entry-points:") - for entry in dist.get('entry_points', []): - logger.info(" %s", entry.strip()) - if list_files: - logger.info("Files:") - for line in dist.get('files', []): - logger.info(" %s", line.strip()) - if "files" not in dist: - logger.info("Cannot locate installed-files.txt") - return results_printed diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/uninstall.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/uninstall.py deleted file mode 100644 index 8ba1a7c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/uninstall.py +++ /dev/null @@ -1,76 +0,0 @@ -from __future__ import absolute_import - -import pip -from pip.wheel import WheelCache -from pip.req import InstallRequirement, RequirementSet, parse_requirements -from pip.basecommand import Command -from pip.exceptions import InstallationError - - -class UninstallCommand(Command): - """ - Uninstall packages. - - pip is able to uninstall most installed packages. Known exceptions are: - - - Pure distutils packages installed with ``python setup.py install``, which - leave behind no metadata to determine what files were installed. - - Script wrappers installed by ``python setup.py develop``. - """ - name = 'uninstall' - usage = """ - %prog [options] ... - %prog [options] -r ...""" - summary = 'Uninstall packages.' - - def __init__(self, *args, **kw): - super(UninstallCommand, self).__init__(*args, **kw) - self.cmd_opts.add_option( - '-r', '--requirement', - dest='requirements', - action='append', - default=[], - metavar='file', - help='Uninstall all the packages listed in the given requirements ' - 'file. This option can be used multiple times.', - ) - self.cmd_opts.add_option( - '-y', '--yes', - dest='yes', - action='store_true', - help="Don't ask for confirmation of uninstall deletions.") - - self.parser.insert_option_group(0, self.cmd_opts) - - def run(self, options, args): - with self._build_session(options) as session: - format_control = pip.index.FormatControl(set(), set()) - wheel_cache = WheelCache(options.cache_dir, format_control) - requirement_set = RequirementSet( - build_dir=None, - src_dir=None, - download_dir=None, - isolated=options.isolated_mode, - session=session, - wheel_cache=wheel_cache, - ) - for name in args: - requirement_set.add_requirement( - InstallRequirement.from_line( - name, isolated=options.isolated_mode, - wheel_cache=wheel_cache - ) - ) - for filename in options.requirements: - for req in parse_requirements( - filename, - options=options, - session=session, - wheel_cache=wheel_cache): - requirement_set.add_requirement(req) - if not requirement_set.has_requirements: - raise InstallationError( - 'You must give at least one requirement to %(name)s (see ' - '"pip help %(name)s")' % dict(name=self.name) - ) - requirement_set.uninstall(auto_confirm=options.yes) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/commands/wheel.py b/classifier/myenv/lib/python3.6/site-packages/pip/commands/wheel.py deleted file mode 100644 index 70e95eb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/commands/wheel.py +++ /dev/null @@ -1,208 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import absolute_import - -import logging -import os -import warnings - -from pip.basecommand import RequirementCommand -from pip.exceptions import CommandError, PreviousBuildDirError -from pip.req import RequirementSet -from pip.utils import import_or_raise -from pip.utils.build import BuildDirectory -from pip.utils.deprecation import RemovedInPip10Warning -from pip.wheel import WheelCache, WheelBuilder -from pip import cmdoptions - - -logger = logging.getLogger(__name__) - - -class WheelCommand(RequirementCommand): - """ - Build Wheel archives for your requirements and dependencies. - - Wheel is a built-package format, and offers the advantage of not - recompiling your software during every install. For more details, see the - wheel docs: https://wheel.readthedocs.io/en/latest/ - - Requirements: setuptools>=0.8, and wheel. - - 'pip wheel' uses the bdist_wheel setuptools extension from the wheel - package to build individual wheels. - - """ - - name = 'wheel' - usage = """ - %prog [options] ... - %prog [options] -r ... - %prog [options] [-e] ... - %prog [options] [-e] ... - %prog [options] ...""" - - summary = 'Build wheels from your requirements.' - - def __init__(self, *args, **kw): - super(WheelCommand, self).__init__(*args, **kw) - - cmd_opts = self.cmd_opts - - cmd_opts.add_option( - '-w', '--wheel-dir', - dest='wheel_dir', - metavar='dir', - default=os.curdir, - help=("Build wheels into , where the default is the " - "current working directory."), - ) - cmd_opts.add_option(cmdoptions.use_wheel()) - cmd_opts.add_option(cmdoptions.no_use_wheel()) - cmd_opts.add_option(cmdoptions.no_binary()) - cmd_opts.add_option(cmdoptions.only_binary()) - cmd_opts.add_option( - '--build-option', - dest='build_options', - metavar='options', - action='append', - help="Extra arguments to be supplied to 'setup.py bdist_wheel'.") - cmd_opts.add_option(cmdoptions.constraints()) - cmd_opts.add_option(cmdoptions.editable()) - cmd_opts.add_option(cmdoptions.requirements()) - cmd_opts.add_option(cmdoptions.src()) - cmd_opts.add_option(cmdoptions.ignore_requires_python()) - cmd_opts.add_option(cmdoptions.no_deps()) - cmd_opts.add_option(cmdoptions.build_dir()) - - cmd_opts.add_option( - '--global-option', - dest='global_options', - action='append', - metavar='options', - help="Extra global options to be supplied to the setup.py " - "call before the 'bdist_wheel' command.") - - cmd_opts.add_option( - '--pre', - action='store_true', - default=False, - help=("Include pre-release and development versions. By default, " - "pip only finds stable versions."), - ) - - cmd_opts.add_option(cmdoptions.no_clean()) - cmd_opts.add_option(cmdoptions.require_hashes()) - - index_opts = cmdoptions.make_option_group( - cmdoptions.index_group, - self.parser, - ) - - self.parser.insert_option_group(0, index_opts) - self.parser.insert_option_group(0, cmd_opts) - - def check_required_packages(self): - import_or_raise( - 'wheel.bdist_wheel', - CommandError, - "'pip wheel' requires the 'wheel' package. To fix this, run: " - "pip install wheel" - ) - pkg_resources = import_or_raise( - 'pkg_resources', - CommandError, - "'pip wheel' requires setuptools >= 0.8 for dist-info support." - " To fix this, run: pip install --upgrade setuptools" - ) - if not hasattr(pkg_resources, 'DistInfoDistribution'): - raise CommandError( - "'pip wheel' requires setuptools >= 0.8 for dist-info " - "support. To fix this, run: pip install --upgrade " - "setuptools" - ) - - def run(self, options, args): - self.check_required_packages() - cmdoptions.resolve_wheel_no_use_binary(options) - cmdoptions.check_install_build_global(options) - - if options.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if options.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - index_urls = [options.index_url] + options.extra_index_urls - if options.no_index: - logger.debug('Ignoring indexes: %s', ','.join(index_urls)) - index_urls = [] - - if options.build_dir: - options.build_dir = os.path.abspath(options.build_dir) - - options.src_dir = os.path.abspath(options.src_dir) - - with self._build_session(options) as session: - finder = self._build_package_finder(options, session) - build_delete = (not (options.no_clean or options.build_dir)) - wheel_cache = WheelCache(options.cache_dir, options.format_control) - with BuildDirectory(options.build_dir, - delete=build_delete) as build_dir: - requirement_set = RequirementSet( - build_dir=build_dir, - src_dir=options.src_dir, - download_dir=None, - ignore_dependencies=options.ignore_dependencies, - ignore_installed=True, - ignore_requires_python=options.ignore_requires_python, - isolated=options.isolated_mode, - session=session, - wheel_cache=wheel_cache, - wheel_download_dir=options.wheel_dir, - require_hashes=options.require_hashes - ) - - self.populate_requirement_set( - requirement_set, args, options, finder, session, self.name, - wheel_cache - ) - - if not requirement_set.has_requirements: - return - - try: - # build wheels - wb = WheelBuilder( - requirement_set, - finder, - build_options=options.build_options or [], - global_options=options.global_options or [], - ) - if not wb.build(): - raise CommandError( - "Failed to build one or more wheels" - ) - except PreviousBuildDirError: - options.no_clean = True - raise - finally: - if not options.no_clean: - requirement_set.cleanup_files() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/compat/__init__.py deleted file mode 100644 index 099672c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__init__.py +++ /dev/null @@ -1,164 +0,0 @@ -"""Stuff that differs in different Python versions and platform -distributions.""" -from __future__ import absolute_import, division - -import os -import sys - -from pip._vendor.six import text_type - -try: - from logging.config import dictConfig as logging_dictConfig -except ImportError: - from pip.compat.dictconfig import dictConfig as logging_dictConfig - -try: - from collections import OrderedDict -except ImportError: - from pip._vendor.ordereddict import OrderedDict - -try: - import ipaddress -except ImportError: - try: - from pip._vendor import ipaddress - except ImportError: - import ipaddr as ipaddress - ipaddress.ip_address = ipaddress.IPAddress - ipaddress.ip_network = ipaddress.IPNetwork - - -try: - import sysconfig - - def get_stdlib(): - paths = [ - sysconfig.get_path("stdlib"), - sysconfig.get_path("platstdlib"), - ] - return set(filter(bool, paths)) -except ImportError: - from distutils import sysconfig - - def get_stdlib(): - paths = [ - sysconfig.get_python_lib(standard_lib=True), - sysconfig.get_python_lib(standard_lib=True, plat_specific=True), - ] - return set(filter(bool, paths)) - - -__all__ = [ - "logging_dictConfig", "ipaddress", "uses_pycache", "console_to_str", - "native_str", "get_path_uid", "stdlib_pkgs", "WINDOWS", "samefile", - "OrderedDict", -] - - -if sys.version_info >= (3, 4): - uses_pycache = True - from importlib.util import cache_from_source -else: - import imp - uses_pycache = hasattr(imp, 'cache_from_source') - if uses_pycache: - cache_from_source = imp.cache_from_source - else: - cache_from_source = None - - -if sys.version_info >= (3,): - def console_to_str(s): - try: - return s.decode(sys.__stdout__.encoding) - except UnicodeDecodeError: - return s.decode('utf_8') - - def native_str(s, replace=False): - if isinstance(s, bytes): - return s.decode('utf-8', 'replace' if replace else 'strict') - return s - -else: - def console_to_str(s): - return s - - def native_str(s, replace=False): - # Replace is ignored -- unicode to UTF-8 can't fail - if isinstance(s, text_type): - return s.encode('utf-8') - return s - - -def total_seconds(td): - if hasattr(td, "total_seconds"): - return td.total_seconds() - else: - val = td.microseconds + (td.seconds + td.days * 24 * 3600) * 10 ** 6 - return val / 10 ** 6 - - -def get_path_uid(path): - """ - Return path's uid. - - Does not follow symlinks: - https://github.com/pypa/pip/pull/935#discussion_r5307003 - - Placed this function in compat due to differences on AIX and - Jython, that should eventually go away. - - :raises OSError: When path is a symlink or can't be read. - """ - if hasattr(os, 'O_NOFOLLOW'): - fd = os.open(path, os.O_RDONLY | os.O_NOFOLLOW) - file_uid = os.fstat(fd).st_uid - os.close(fd) - else: # AIX and Jython - # WARNING: time of check vulnerability, but best we can do w/o NOFOLLOW - if not os.path.islink(path): - # older versions of Jython don't have `os.fstat` - file_uid = os.stat(path).st_uid - else: - # raise OSError for parity with os.O_NOFOLLOW above - raise OSError( - "%s is a symlink; Will not return uid for symlinks" % path - ) - return file_uid - - -def expanduser(path): - """ - Expand ~ and ~user constructions. - - Includes a workaround for http://bugs.python.org/issue14768 - """ - expanded = os.path.expanduser(path) - if path.startswith('~/') and expanded.startswith('//'): - expanded = expanded[1:] - return expanded - - -# packages in the stdlib that may have installation metadata, but should not be -# considered 'installed'. this theoretically could be determined based on -# dist.location (py27:`sysconfig.get_paths()['stdlib']`, -# py26:sysconfig.get_config_vars('LIBDEST')), but fear platform variation may -# make this ineffective, so hard-coding -stdlib_pkgs = ('python', 'wsgiref') -if sys.version_info >= (2, 7): - stdlib_pkgs += ('argparse',) - - -# windows detection, covers cpython and ironpython -WINDOWS = (sys.platform.startswith("win") or - (sys.platform == 'cli' and os.name == 'nt')) - - -def samefile(file1, file2): - """Provide an alternative for os.path.samefile on Windows/Python2""" - if hasattr(os.path, 'samefile'): - return os.path.samefile(file1, file2) - else: - path1 = os.path.normcase(os.path.abspath(file1)) - path2 = os.path.normcase(os.path.abspath(file2)) - return path1 == path2 diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 990d5e3..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/dictconfig.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/dictconfig.cpython-36.pyc deleted file mode 100644 index 76b2b41..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/compat/__pycache__/dictconfig.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/compat/dictconfig.py b/classifier/myenv/lib/python3.6/site-packages/pip/compat/dictconfig.py deleted file mode 100644 index ec684aa..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/compat/dictconfig.py +++ /dev/null @@ -1,565 +0,0 @@ -# This is a copy of the Python logging.config.dictconfig module, -# reproduced with permission. It is provided here for backwards -# compatibility for Python versions prior to 2.7. -# -# Copyright 2009-2010 by Vinay Sajip. All Rights Reserved. -# -# Permission to use, copy, modify, and distribute this software and its -# documentation for any purpose and without fee is hereby granted, -# provided that the above copyright notice appear in all copies and that -# both that copyright notice and this permission notice appear in -# supporting documentation, and that the name of Vinay Sajip -# not be used in advertising or publicity pertaining to distribution -# of the software without specific, written prior permission. -# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING -# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL -# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR -# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER -# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT -# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. -from __future__ import absolute_import - -import logging.handlers -import re -import sys -import types - -from pip._vendor import six - -# flake8: noqa - -IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) - - -def valid_ident(s): - m = IDENTIFIER.match(s) - if not m: - raise ValueError('Not a valid Python identifier: %r' % s) - return True - -# -# This function is defined in logging only in recent versions of Python -# -try: - from logging import _checkLevel -except ImportError: - def _checkLevel(level): - if isinstance(level, int): - rv = level - elif str(level) == level: - if level not in logging._levelNames: - raise ValueError('Unknown level: %r' % level) - rv = logging._levelNames[level] - else: - raise TypeError('Level not an integer or a ' - 'valid string: %r' % level) - return rv - -# The ConvertingXXX classes are wrappers around standard Python containers, -# and they serve to convert any suitable values in the container. The -# conversion converts base dicts, lists and tuples to their wrapped -# equivalents, whereas strings which match a conversion format are converted -# appropriately. -# -# Each wrapper should have a configurator attribute holding the actual -# configurator to use for conversion. - - -class ConvertingDict(dict): - """A converting dictionary wrapper.""" - - def __getitem__(self, key): - value = dict.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def get(self, key, default=None): - value = dict.get(self, key, default) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, key, default=None): - value = dict.pop(self, key, default) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - -class ConvertingList(list): - """A converting list wrapper.""" - def __getitem__(self, key): - value = list.__getitem__(self, key) - result = self.configurator.convert(value) - # If the converted value is different, save for next time - if value is not result: - self[key] = result - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - def pop(self, idx=-1): - value = list.pop(self, idx) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - return result - - -class ConvertingTuple(tuple): - """A converting tuple wrapper.""" - def __getitem__(self, key): - value = tuple.__getitem__(self, key) - result = self.configurator.convert(value) - if value is not result: - if type(result) in (ConvertingDict, ConvertingList, - ConvertingTuple): - result.parent = self - result.key = key - return result - - -class BaseConfigurator(object): - """ - The configurator base class which defines some useful defaults. - """ - - CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') - - WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') - DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') - INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') - DIGIT_PATTERN = re.compile(r'^\d+$') - - value_converters = { - 'ext' : 'ext_convert', - 'cfg' : 'cfg_convert', - } - - # We might want to use a different one, e.g. importlib - importer = __import__ - - def __init__(self, config): - self.config = ConvertingDict(config) - self.config.configurator = self - - def resolve(self, s): - """ - Resolve strings to objects using standard import and attribute - syntax. - """ - name = s.split('.') - used = name.pop(0) - try: - found = self.importer(used) - for frag in name: - used += '.' + frag - try: - found = getattr(found, frag) - except AttributeError: - self.importer(used) - found = getattr(found, frag) - return found - except ImportError: - e, tb = sys.exc_info()[1:] - v = ValueError('Cannot resolve %r: %s' % (s, e)) - v.__cause__, v.__traceback__ = e, tb - raise v - - def ext_convert(self, value): - """Default converter for the ext:// protocol.""" - return self.resolve(value) - - def cfg_convert(self, value): - """Default converter for the cfg:// protocol.""" - rest = value - m = self.WORD_PATTERN.match(rest) - if m is None: - raise ValueError("Unable to convert %r" % value) - else: - rest = rest[m.end():] - d = self.config[m.groups()[0]] - # print d, rest - while rest: - m = self.DOT_PATTERN.match(rest) - if m: - d = d[m.groups()[0]] - else: - m = self.INDEX_PATTERN.match(rest) - if m: - idx = m.groups()[0] - if not self.DIGIT_PATTERN.match(idx): - d = d[idx] - else: - try: - n = int(idx) # try as number first (most likely) - d = d[n] - except TypeError: - d = d[idx] - if m: - rest = rest[m.end():] - else: - raise ValueError('Unable to convert ' - '%r at %r' % (value, rest)) - # rest should be empty - return d - - def convert(self, value): - """ - Convert values to an appropriate type. dicts, lists and tuples are - replaced by their converting alternatives. Strings are checked to - see if they have a conversion format and are converted if they do. - """ - if not isinstance(value, ConvertingDict) and isinstance(value, dict): - value = ConvertingDict(value) - value.configurator = self - elif not isinstance(value, ConvertingList) and isinstance(value, list): - value = ConvertingList(value) - value.configurator = self - elif not isinstance(value, ConvertingTuple) and\ - isinstance(value, tuple): - value = ConvertingTuple(value) - value.configurator = self - elif isinstance(value, six.string_types): # str for py3k - m = self.CONVERT_PATTERN.match(value) - if m: - d = m.groupdict() - prefix = d['prefix'] - converter = self.value_converters.get(prefix, None) - if converter: - suffix = d['suffix'] - converter = getattr(self, converter) - value = converter(suffix) - return value - - def configure_custom(self, config): - """Configure an object with a user-supplied factory.""" - c = config.pop('()') - if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: - c = self.resolve(c) - props = config.pop('.', None) - # Check for valid identifiers - kwargs = dict((k, config[k]) for k in config if valid_ident(k)) - result = c(**kwargs) - if props: - for name, value in props.items(): - setattr(result, name, value) - return result - - def as_tuple(self, value): - """Utility function which converts lists to tuples.""" - if isinstance(value, list): - value = tuple(value) - return value - - -class DictConfigurator(BaseConfigurator): - """ - Configure logging using a dictionary-like object to describe the - configuration. - """ - - def configure(self): - """Do the configuration.""" - - config = self.config - if 'version' not in config: - raise ValueError("dictionary doesn't specify a version") - if config['version'] != 1: - raise ValueError("Unsupported version: %s" % config['version']) - incremental = config.pop('incremental', False) - EMPTY_DICT = {} - logging._acquireLock() - try: - if incremental: - handlers = config.get('handlers', EMPTY_DICT) - # incremental handler config only if handler name - # ties in to logging._handlers (Python 2.7) - if sys.version_info[:2] == (2, 7): - for name in handlers: - if name not in logging._handlers: - raise ValueError('No handler found with ' - 'name %r' % name) - else: - try: - handler = logging._handlers[name] - handler_config = handlers[name] - level = handler_config.get('level', None) - if level: - handler.setLevel(_checkLevel(level)) - except StandardError as e: - raise ValueError('Unable to configure handler ' - '%r: %s' % (name, e)) - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - try: - self.configure_logger(name, loggers[name], True) - except StandardError as e: - raise ValueError('Unable to configure logger ' - '%r: %s' % (name, e)) - root = config.get('root', None) - if root: - try: - self.configure_root(root, True) - except StandardError as e: - raise ValueError('Unable to configure root ' - 'logger: %s' % e) - else: - disable_existing = config.pop('disable_existing_loggers', True) - - logging._handlers.clear() - del logging._handlerList[:] - - # Do formatters first - they don't refer to anything else - formatters = config.get('formatters', EMPTY_DICT) - for name in formatters: - try: - formatters[name] = self.configure_formatter( - formatters[name]) - except StandardError as e: - raise ValueError('Unable to configure ' - 'formatter %r: %s' % (name, e)) - # Next, do filters - they don't refer to anything else, either - filters = config.get('filters', EMPTY_DICT) - for name in filters: - try: - filters[name] = self.configure_filter(filters[name]) - except StandardError as e: - raise ValueError('Unable to configure ' - 'filter %r: %s' % (name, e)) - - # Next, do handlers - they refer to formatters and filters - # As handlers can refer to other handlers, sort the keys - # to allow a deterministic order of configuration - handlers = config.get('handlers', EMPTY_DICT) - for name in sorted(handlers): - try: - handler = self.configure_handler(handlers[name]) - handler.name = name - handlers[name] = handler - except StandardError as e: - raise ValueError('Unable to configure handler ' - '%r: %s' % (name, e)) - # Next, do loggers - they refer to handlers and filters - - # we don't want to lose the existing loggers, - # since other threads may have pointers to them. - # existing is set to contain all existing loggers, - # and as we go through the new configuration we - # remove any which are configured. At the end, - # what's left in existing is the set of loggers - # which were in the previous configuration but - # which are not in the new configuration. - root = logging.root - existing = list(root.manager.loggerDict) - # The list needs to be sorted so that we can - # avoid disabling child loggers of explicitly - # named loggers. With a sorted list it is easier - # to find the child loggers. - existing.sort() - # We'll keep the list of existing loggers - # which are children of named loggers here... - child_loggers = [] - # now set up the new ones... - loggers = config.get('loggers', EMPTY_DICT) - for name in loggers: - if name in existing: - i = existing.index(name) - prefixed = name + "." - pflen = len(prefixed) - num_existing = len(existing) - i = i + 1 # look at the entry after name - while (i < num_existing) and\ - (existing[i][:pflen] == prefixed): - child_loggers.append(existing[i]) - i = i + 1 - existing.remove(name) - try: - self.configure_logger(name, loggers[name]) - except StandardError as e: - raise ValueError('Unable to configure logger ' - '%r: %s' % (name, e)) - - # Disable any old loggers. There's no point deleting - # them as other threads may continue to hold references - # and by disabling them, you stop them doing any logging. - # However, don't disable children of named loggers, as that's - # probably not what was intended by the user. - for log in existing: - logger = root.manager.loggerDict[log] - if log in child_loggers: - logger.level = logging.NOTSET - logger.handlers = [] - logger.propagate = True - elif disable_existing: - logger.disabled = True - - # And finally, do the root logger - root = config.get('root', None) - if root: - try: - self.configure_root(root) - except StandardError as e: - raise ValueError('Unable to configure root ' - 'logger: %s' % e) - finally: - logging._releaseLock() - - def configure_formatter(self, config): - """Configure a formatter from a dictionary.""" - if '()' in config: - factory = config['()'] # for use in exception handler - try: - result = self.configure_custom(config) - except TypeError as te: - if "'format'" not in str(te): - raise - # Name of parameter changed from fmt to format. - # Retry with old name. - # This is so that code can be used with older Python versions - #(e.g. by Django) - config['fmt'] = config.pop('format') - config['()'] = factory - result = self.configure_custom(config) - else: - fmt = config.get('format', None) - dfmt = config.get('datefmt', None) - result = logging.Formatter(fmt, dfmt) - return result - - def configure_filter(self, config): - """Configure a filter from a dictionary.""" - if '()' in config: - result = self.configure_custom(config) - else: - name = config.get('name', '') - result = logging.Filter(name) - return result - - def add_filters(self, filterer, filters): - """Add filters to a filterer from a list of names.""" - for f in filters: - try: - filterer.addFilter(self.config['filters'][f]) - except StandardError as e: - raise ValueError('Unable to add filter %r: %s' % (f, e)) - - def configure_handler(self, config): - """Configure a handler from a dictionary.""" - formatter = config.pop('formatter', None) - if formatter: - try: - formatter = self.config['formatters'][formatter] - except StandardError as e: - raise ValueError('Unable to set formatter ' - '%r: %s' % (formatter, e)) - level = config.pop('level', None) - filters = config.pop('filters', None) - if '()' in config: - c = config.pop('()') - if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: - c = self.resolve(c) - factory = c - else: - klass = self.resolve(config.pop('class')) - # Special case for handler which refers to another handler - if issubclass(klass, logging.handlers.MemoryHandler) and\ - 'target' in config: - try: - config['target'] = self.config['handlers'][config['target']] - except StandardError as e: - raise ValueError('Unable to set target handler ' - '%r: %s' % (config['target'], e)) - elif issubclass(klass, logging.handlers.SMTPHandler) and\ - 'mailhost' in config: - config['mailhost'] = self.as_tuple(config['mailhost']) - elif issubclass(klass, logging.handlers.SysLogHandler) and\ - 'address' in config: - config['address'] = self.as_tuple(config['address']) - factory = klass - kwargs = dict((k, config[k]) for k in config if valid_ident(k)) - try: - result = factory(**kwargs) - except TypeError as te: - if "'stream'" not in str(te): - raise - # The argument name changed from strm to stream - # Retry with old name. - # This is so that code can be used with older Python versions - #(e.g. by Django) - kwargs['strm'] = kwargs.pop('stream') - result = factory(**kwargs) - if formatter: - result.setFormatter(formatter) - if level is not None: - result.setLevel(_checkLevel(level)) - if filters: - self.add_filters(result, filters) - return result - - def add_handlers(self, logger, handlers): - """Add handlers to a logger from a list of names.""" - for h in handlers: - try: - logger.addHandler(self.config['handlers'][h]) - except StandardError as e: - raise ValueError('Unable to add handler %r: %s' % (h, e)) - - def common_logger_config(self, logger, config, incremental=False): - """ - Perform configuration which is common to root and non-root loggers. - """ - level = config.get('level', None) - if level is not None: - logger.setLevel(_checkLevel(level)) - if not incremental: - # Remove any existing handlers - for h in logger.handlers[:]: - logger.removeHandler(h) - handlers = config.get('handlers', None) - if handlers: - self.add_handlers(logger, handlers) - filters = config.get('filters', None) - if filters: - self.add_filters(logger, filters) - - def configure_logger(self, name, config, incremental=False): - """Configure a non-root logger from a dictionary.""" - logger = logging.getLogger(name) - self.common_logger_config(logger, config, incremental) - propagate = config.get('propagate', None) - if propagate is not None: - logger.propagate = propagate - - def configure_root(self, config, incremental=False): - """Configure a root logger from a dictionary.""" - root = logging.getLogger() - self.common_logger_config(root, config, incremental) - -dictConfigClass = DictConfigurator - - -def dictConfig(config): - """Configure logging using a dictionary.""" - dictConfigClass(config).configure() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/download.py b/classifier/myenv/lib/python3.6/site-packages/pip/download.py deleted file mode 100644 index 54d3131..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/download.py +++ /dev/null @@ -1,906 +0,0 @@ -from __future__ import absolute_import - -import cgi -import email.utils -import getpass -import json -import logging -import mimetypes -import os -import platform -import re -import shutil -import sys -import tempfile - -try: - import ssl # noqa - HAS_TLS = True -except ImportError: - HAS_TLS = False - -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -import pip - -from pip.exceptions import InstallationError, HashMismatch -from pip.models import PyPI -from pip.utils import (splitext, rmtree, format_size, display_path, - backup_dir, ask_path_exists, unpack_file, - ARCHIVE_EXTENSIONS, consume, call_subprocess) -from pip.utils.encoding import auto_decode -from pip.utils.filesystem import check_path_owner -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip.utils.glibc import libc_ver -from pip.utils.ui import DownloadProgressBar, DownloadProgressSpinner -from pip.locations import write_delete_marker_file -from pip.vcs import vcs -from pip._vendor import requests, six -from pip._vendor.requests.adapters import BaseAdapter, HTTPAdapter -from pip._vendor.requests.auth import AuthBase, HTTPBasicAuth -from pip._vendor.requests.models import CONTENT_CHUNK_SIZE, Response -from pip._vendor.requests.utils import get_netrc_auth -from pip._vendor.requests.structures import CaseInsensitiveDict -from pip._vendor.requests.packages import urllib3 -from pip._vendor.cachecontrol import CacheControlAdapter -from pip._vendor.cachecontrol.caches import FileCache -from pip._vendor.lockfile import LockError -from pip._vendor.six.moves import xmlrpc_client - - -__all__ = ['get_file_content', - 'is_url', 'url_to_path', 'path_to_url', - 'is_archive_file', 'unpack_vcs_link', - 'unpack_file_url', 'is_vcs_url', 'is_file_url', - 'unpack_http_url', 'unpack_url'] - - -logger = logging.getLogger(__name__) - - -def user_agent(): - """ - Return a string representing the user agent. - """ - data = { - "installer": {"name": "pip", "version": pip.__version__}, - "python": platform.python_version(), - "implementation": { - "name": platform.python_implementation(), - }, - } - - if data["implementation"]["name"] == 'CPython': - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'PyPy': - if sys.pypy_version_info.releaselevel == 'final': - pypy_version_info = sys.pypy_version_info[:3] - else: - pypy_version_info = sys.pypy_version_info - data["implementation"]["version"] = ".".join( - [str(x) for x in pypy_version_info] - ) - elif data["implementation"]["name"] == 'Jython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == 'IronPython': - # Complete Guess - data["implementation"]["version"] = platform.python_version() - - if sys.platform.startswith("linux"): - from pip._vendor import distro - distro_infos = dict(filter( - lambda x: x[1], - zip(["name", "version", "id"], distro.linux_distribution()), - )) - libc = dict(filter( - lambda x: x[1], - zip(["lib", "version"], libc_ver()), - )) - if libc: - distro_infos["libc"] = libc - if distro_infos: - data["distro"] = distro_infos - - if sys.platform.startswith("darwin") and platform.mac_ver()[0]: - data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} - - if platform.system(): - data.setdefault("system", {})["name"] = platform.system() - - if platform.release(): - data.setdefault("system", {})["release"] = platform.release() - - if platform.machine(): - data["cpu"] = platform.machine() - - # Python 2.6 doesn't have ssl.OPENSSL_VERSION. - if HAS_TLS and sys.version_info[:2] > (2, 6): - data["openssl_version"] = ssl.OPENSSL_VERSION - - return "{data[installer][name]}/{data[installer][version]} {json}".format( - data=data, - json=json.dumps(data, separators=(",", ":"), sort_keys=True), - ) - - -class MultiDomainBasicAuth(AuthBase): - - def __init__(self, prompting=True): - self.prompting = prompting - self.passwords = {} - - def __call__(self, req): - parsed = urllib_parse.urlparse(req.url) - - # Get the netloc without any embedded credentials - netloc = parsed.netloc.rsplit("@", 1)[-1] - - # Set the url of the request to the url without any credentials - req.url = urllib_parse.urlunparse(parsed[:1] + (netloc,) + parsed[2:]) - - # Use any stored credentials that we have for this netloc - username, password = self.passwords.get(netloc, (None, None)) - - # Extract credentials embedded in the url if we have none stored - if username is None: - username, password = self.parse_credentials(parsed.netloc) - - # Get creds from netrc if we still don't have them - if username is None and password is None: - netrc_auth = get_netrc_auth(req.url) - username, password = netrc_auth if netrc_auth else (None, None) - - if username or password: - # Store the username and password - self.passwords[netloc] = (username, password) - - # Send the basic auth with this request - req = HTTPBasicAuth(username or "", password or "")(req) - - # Attach a hook to handle 401 responses - req.register_hook("response", self.handle_401) - - return req - - def handle_401(self, resp, **kwargs): - # We only care about 401 responses, anything else we want to just - # pass through the actual response - if resp.status_code != 401: - return resp - - # We are not able to prompt the user so simply return the response - if not self.prompting: - return resp - - parsed = urllib_parse.urlparse(resp.url) - - # Prompt the user for a new username and password - username = six.moves.input("User for %s: " % parsed.netloc) - password = getpass.getpass("Password: ") - - # Store the new username and password to use for future requests - if username or password: - self.passwords[parsed.netloc] = (username, password) - - # Consume content and release the original connection to allow our new - # request to reuse the same one. - resp.content - resp.raw.release_conn() - - # Add our new username and password to the request - req = HTTPBasicAuth(username or "", password or "")(resp.request) - - # Send our new request - new_resp = resp.connection.send(req, **kwargs) - new_resp.history.append(resp) - - return new_resp - - def parse_credentials(self, netloc): - if "@" in netloc: - userinfo = netloc.rsplit("@", 1)[0] - if ":" in userinfo: - return userinfo.split(":", 1) - return userinfo, None - return None, None - - -class LocalFSAdapter(BaseAdapter): - - def send(self, request, stream=None, timeout=None, verify=None, cert=None, - proxies=None): - pathname = url_to_path(request.url) - - resp = Response() - resp.status_code = 200 - resp.url = request.url - - try: - stats = os.stat(pathname) - except OSError as exc: - resp.status_code = 404 - resp.raw = exc - else: - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - content_type = mimetypes.guess_type(pathname)[0] or "text/plain" - resp.headers = CaseInsensitiveDict({ - "Content-Type": content_type, - "Content-Length": stats.st_size, - "Last-Modified": modified, - }) - - resp.raw = open(pathname, "rb") - resp.close = resp.raw.close - - return resp - - def close(self): - pass - - -class SafeFileCache(FileCache): - """ - A file based cache which is safe to use even when the target directory may - not be accessible or writable. - """ - - def __init__(self, *args, **kwargs): - super(SafeFileCache, self).__init__(*args, **kwargs) - - # Check to ensure that the directory containing our cache directory - # is owned by the user current executing pip. If it does not exist - # we will check the parent directory until we find one that does exist. - # If it is not owned by the user executing pip then we will disable - # the cache and log a warning. - if not check_path_owner(self.directory): - logger.warning( - "The directory '%s' or its parent directory is not owned by " - "the current user and the cache has been disabled. Please " - "check the permissions and owner of that directory. If " - "executing pip with sudo, you may want sudo's -H flag.", - self.directory, - ) - - # Set our directory to None to disable the Cache - self.directory = None - - def get(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).get(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def set(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).set(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - def delete(self, *args, **kwargs): - # If we don't have a directory, then the cache should be a no-op. - if self.directory is None: - return - - try: - return super(SafeFileCache, self).delete(*args, **kwargs) - except (LockError, OSError, IOError): - # We intentionally silence this error, if we can't access the cache - # then we can just skip caching and process the request as if - # caching wasn't enabled. - pass - - -class InsecureHTTPAdapter(HTTPAdapter): - - def cert_verify(self, conn, url, verify, cert): - conn.cert_reqs = 'CERT_NONE' - conn.ca_certs = None - - -class PipSession(requests.Session): - - timeout = None - - def __init__(self, *args, **kwargs): - retries = kwargs.pop("retries", 0) - cache = kwargs.pop("cache", None) - insecure_hosts = kwargs.pop("insecure_hosts", []) - - super(PipSession, self).__init__(*args, **kwargs) - - # Attach our User Agent to the request - self.headers["User-Agent"] = user_agent() - - # Attach our Authentication handler to the session - self.auth = MultiDomainBasicAuth() - - # Create our urllib3.Retry instance which will allow us to customize - # how we handle retries. - retries = urllib3.Retry( - # Set the total number of retries that a particular request can - # have. - total=retries, - - # A 503 error from PyPI typically means that the Fastly -> Origin - # connection got interrupted in some way. A 503 error in general - # is typically considered a transient error so we'll go ahead and - # retry it. - status_forcelist=[503], - - # Add a small amount of back off between failed requests in - # order to prevent hammering the service. - backoff_factor=0.25, - ) - - # We want to _only_ cache responses on securely fetched origins. We do - # this because we can't validate the response of an insecurely fetched - # origin, and we don't want someone to be able to poison the cache and - # require manual eviction from the cache to fix it. - if cache: - secure_adapter = CacheControlAdapter( - cache=SafeFileCache(cache, use_dir_lock=True), - max_retries=retries, - ) - else: - secure_adapter = HTTPAdapter(max_retries=retries) - - # Our Insecure HTTPAdapter disables HTTPS validation. It does not - # support caching (see above) so we'll use it for all http:// URLs as - # well as any https:// host that we've marked as ignoring TLS errors - # for. - insecure_adapter = InsecureHTTPAdapter(max_retries=retries) - - self.mount("https://", secure_adapter) - self.mount("http://", insecure_adapter) - - # Enable file:// urls - self.mount("file://", LocalFSAdapter()) - - # We want to use a non-validating adapter for any requests which are - # deemed insecure. - for host in insecure_hosts: - self.mount("https://{0}/".format(host), insecure_adapter) - - def request(self, method, url, *args, **kwargs): - # Allow setting a default timeout on a session - kwargs.setdefault("timeout", self.timeout) - - # Dispatch the actual request - return super(PipSession, self).request(method, url, *args, **kwargs) - - -def get_file_content(url, comes_from=None, session=None): - """Gets the content of a file; it may be a filename, file: URL, or - http: URL. Returns (location, content). Content is unicode.""" - if session is None: - raise TypeError( - "get_file_content() missing 1 required keyword argument: 'session'" - ) - - match = _scheme_re.search(url) - if match: - scheme = match.group(1).lower() - if (scheme == 'file' and comes_from and - comes_from.startswith('http')): - raise InstallationError( - 'Requirements file %s references URL %s, which is local' - % (comes_from, url)) - if scheme == 'file': - path = url.split(':', 1)[1] - path = path.replace('\\', '/') - match = _url_slash_drive_re.match(path) - if match: - path = match.group(1) + ':' + path.split('|', 1)[1] - path = urllib_parse.unquote(path) - if path.startswith('/'): - path = '/' + path.lstrip('/') - url = path - else: - # FIXME: catch some errors - resp = session.get(url) - resp.raise_for_status() - return resp.url, resp.text - try: - with open(url, 'rb') as f: - content = auto_decode(f.read()) - except IOError as exc: - raise InstallationError( - 'Could not open requirements file: %s' % str(exc) - ) - return url, content - - -_scheme_re = re.compile(r'^(http|https|file):', re.I) -_url_slash_drive_re = re.compile(r'/*([a-z])\|', re.I) - - -def is_url(name): - """Returns true if the name looks like a URL""" - if ':' not in name: - return False - scheme = name.split(':', 1)[0].lower() - return scheme in ['http', 'https', 'file', 'ftp'] + vcs.all_schemes - - -def url_to_path(url): - """ - Convert a file: URL to a path. - """ - assert url.startswith('file:'), ( - "You can only turn file: urls into filenames (not %r)" % url) - - _, netloc, path, _, _ = urllib_parse.urlsplit(url) - - # if we have a UNC path, prepend UNC share notation - if netloc: - netloc = '\\\\' + netloc - - path = urllib_request.url2pathname(netloc + path) - return path - - -def path_to_url(path): - """ - Convert a path to a file: URL. The path will be made absolute and have - quoted path parts. - """ - path = os.path.normpath(os.path.abspath(path)) - url = urllib_parse.urljoin('file:', urllib_request.pathname2url(path)) - return url - - -def is_archive_file(name): - """Return True if `name` is a considered as an archive file.""" - ext = splitext(name)[1].lower() - if ext in ARCHIVE_EXTENSIONS: - return True - return False - - -def unpack_vcs_link(link, location): - vcs_backend = _get_used_vcs_backend(link) - vcs_backend.unpack(location) - - -def _get_used_vcs_backend(link): - for backend in vcs.backends: - if link.scheme in backend.schemes: - vcs_backend = backend(link.url) - return vcs_backend - - -def is_vcs_url(link): - return bool(_get_used_vcs_backend(link)) - - -def is_file_url(link): - return link.url.lower().startswith('file:') - - -def is_dir_url(link): - """Return whether a file:// Link points to a directory. - - ``link`` must not have any other scheme but file://. Call is_file_url() - first. - - """ - link_path = url_to_path(link.url_without_fragment) - return os.path.isdir(link_path) - - -def _progress_indicator(iterable, *args, **kwargs): - return iterable - - -def _download_url(resp, link, content_file, hashes): - try: - total_length = int(resp.headers['content-length']) - except (ValueError, KeyError, TypeError): - total_length = 0 - - cached_resp = getattr(resp, "from_cache", False) - - if logger.getEffectiveLevel() > logging.INFO: - show_progress = False - elif cached_resp: - show_progress = False - elif total_length > (40 * 1000): - show_progress = True - elif not total_length: - show_progress = True - else: - show_progress = False - - show_url = link.show_url - - def resp_read(chunk_size): - try: - # Special case for urllib3. - for chunk in resp.raw.stream( - chunk_size, - # We use decode_content=False here because we don't - # want urllib3 to mess with the raw bytes we get - # from the server. If we decompress inside of - # urllib3 then we cannot verify the checksum - # because the checksum will be of the compressed - # file. This breakage will only occur if the - # server adds a Content-Encoding header, which - # depends on how the server was configured: - # - Some servers will notice that the file isn't a - # compressible file and will leave the file alone - # and with an empty Content-Encoding - # - Some servers will notice that the file is - # already compressed and will leave the file - # alone and will add a Content-Encoding: gzip - # header - # - Some servers won't notice anything at all and - # will take a file that's already been compressed - # and compress it again and set the - # Content-Encoding: gzip header - # - # By setting this not to decode automatically we - # hope to eliminate problems with the second case. - decode_content=False): - yield chunk - except AttributeError: - # Standard file-like object. - while True: - chunk = resp.raw.read(chunk_size) - if not chunk: - break - yield chunk - - def written_chunks(chunks): - for chunk in chunks: - content_file.write(chunk) - yield chunk - - progress_indicator = _progress_indicator - - if link.netloc == PyPI.netloc: - url = show_url - else: - url = link.url_without_fragment - - if show_progress: # We don't show progress on cached responses - if total_length: - logger.info("Downloading %s (%s)", url, format_size(total_length)) - progress_indicator = DownloadProgressBar(max=total_length).iter - else: - logger.info("Downloading %s", url) - progress_indicator = DownloadProgressSpinner().iter - elif cached_resp: - logger.info("Using cached %s", url) - else: - logger.info("Downloading %s", url) - - logger.debug('Downloading from URL %s', link) - - downloaded_chunks = written_chunks( - progress_indicator( - resp_read(CONTENT_CHUNK_SIZE), - CONTENT_CHUNK_SIZE - ) - ) - if hashes: - hashes.check_against_chunks(downloaded_chunks) - else: - consume(downloaded_chunks) - - -def _copy_file(filename, location, link): - copy = True - download_location = os.path.join(location, link.filename) - if os.path.exists(download_location): - response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)abort' % - display_path(download_location), ('i', 'w', 'b', 'a')) - if response == 'i': - copy = False - elif response == 'w': - logger.warning('Deleting %s', display_path(download_location)) - os.remove(download_location) - elif response == 'b': - dest_file = backup_dir(download_location) - logger.warning( - 'Backing up %s to %s', - display_path(download_location), - display_path(dest_file), - ) - shutil.move(download_location, dest_file) - elif response == 'a': - sys.exit(-1) - if copy: - shutil.copy(filename, download_location) - logger.info('Saved %s', display_path(download_location)) - - -def unpack_http_url(link, location, download_dir=None, - session=None, hashes=None): - if session is None: - raise TypeError( - "unpack_http_url() missing 1 required keyword argument: 'session'" - ) - - temp_dir = tempfile.mkdtemp('-unpack', 'pip-') - - # If a download dir is specified, is the file already downloaded there? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, - download_dir, - hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - content_type = mimetypes.guess_type(from_path)[0] - else: - # let's download to a tmp dir - from_path, content_type = _download_http_url(link, - session, - temp_dir, - hashes) - - # unpack the archive to the build dir location. even when only downloading - # archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) - - # a download dir is specified; let's copy the archive there - if download_dir and not already_downloaded_path: - _copy_file(from_path, download_dir, link) - - if not already_downloaded_path: - os.unlink(from_path) - rmtree(temp_dir) - - -def unpack_file_url(link, location, download_dir=None, hashes=None): - """Unpack link into location. - - If download_dir is provided and link points to a file, make a copy - of the link file inside download_dir. - """ - link_path = url_to_path(link.url_without_fragment) - - # If it's a url to a local directory - if is_dir_url(link): - if os.path.isdir(location): - rmtree(location) - shutil.copytree(link_path, location, symlinks=True) - if download_dir: - logger.info('Link is a directory, ignoring download_dir') - return - - # If --require-hashes is off, `hashes` is either empty, the - # link's embedded hash, or MissingHashes; it is required to - # match. If --require-hashes is on, we are satisfied by any - # hash in `hashes` matching: a URL-based or an option-based - # one; no internet-sourced hash will be in `hashes`. - if hashes: - hashes.check_against_path(link_path) - - # If a download dir is specified, is the file already there and valid? - already_downloaded_path = None - if download_dir: - already_downloaded_path = _check_download_dir(link, - download_dir, - hashes) - - if already_downloaded_path: - from_path = already_downloaded_path - else: - from_path = link_path - - content_type = mimetypes.guess_type(from_path)[0] - - # unpack the archive to the build dir location. even when only downloading - # archives, they have to be unpacked to parse dependencies - unpack_file(from_path, location, content_type, link) - - # a download dir is specified and not already downloaded - if download_dir and not already_downloaded_path: - _copy_file(from_path, download_dir, link) - - -def _copy_dist_from_dir(link_path, location): - """Copy distribution files in `link_path` to `location`. - - Invoked when user requests to install a local directory. E.g.: - - pip install . - pip install ~/dev/git-repos/python-prompt-toolkit - - """ - - # Note: This is currently VERY SLOW if you have a lot of data in the - # directory, because it copies everything with `shutil.copytree`. - # What it should really do is build an sdist and install that. - # See https://github.com/pypa/pip/issues/2195 - - if os.path.isdir(location): - rmtree(location) - - # build an sdist - setup_py = 'setup.py' - sdist_args = [sys.executable] - sdist_args.append('-c') - sdist_args.append(SETUPTOOLS_SHIM % setup_py) - sdist_args.append('sdist') - sdist_args += ['--dist-dir', location] - logger.info('Running setup.py sdist for %s', link_path) - - with indent_log(): - call_subprocess(sdist_args, cwd=link_path, show_stdout=False) - - # unpack sdist into `location` - sdist = os.path.join(location, os.listdir(location)[0]) - logger.info('Unpacking sdist %s into %s', sdist, location) - unpack_file(sdist, location, content_type=None, link=None) - - -class PipXmlrpcTransport(xmlrpc_client.Transport): - """Provide a `xmlrpclib.Transport` implementation via a `PipSession` - object. - """ - - def __init__(self, index_url, session, use_datetime=False): - xmlrpc_client.Transport.__init__(self, use_datetime) - index_parts = urllib_parse.urlparse(index_url) - self._scheme = index_parts.scheme - self._session = session - - def request(self, host, handler, request_body, verbose=False): - parts = (self._scheme, host, handler, None, None, None) - url = urllib_parse.urlunparse(parts) - try: - headers = {'Content-Type': 'text/xml'} - response = self._session.post(url, data=request_body, - headers=headers, stream=True) - response.raise_for_status() - self.verbose = verbose - return self.parse_response(response.raw) - except requests.HTTPError as exc: - logger.critical( - "HTTP error %s while getting %s", - exc.response.status_code, url, - ) - raise - - -def unpack_url(link, location, download_dir=None, - only_download=False, session=None, hashes=None): - """Unpack link. - If link is a VCS link: - if only_download, export into download_dir and ignore location - else unpack into location - for other types of link: - - unpack into location - - if download_dir, copy the file into download_dir - - if only_download, mark location for deletion - - :param hashes: A Hashes object, one of whose embedded hashes must match, - or HashMismatch will be raised. If the Hashes is empty, no matches are - required, and unhashable types of requirements (like VCS ones, which - would ordinarily raise HashUnsupported) are allowed. - """ - # non-editable vcs urls - if is_vcs_url(link): - unpack_vcs_link(link, location) - - # file urls - elif is_file_url(link): - unpack_file_url(link, location, download_dir, hashes=hashes) - - # http urls - else: - if session is None: - session = PipSession() - - unpack_http_url( - link, - location, - download_dir, - session, - hashes=hashes - ) - if only_download: - write_delete_marker_file(location) - - -def _download_http_url(link, session, temp_dir, hashes): - """Download link url into temp_dir using provided session""" - target_url = link.url.split('#', 1)[0] - try: - resp = session.get( - target_url, - # We use Accept-Encoding: identity here because requests - # defaults to accepting compressed responses. This breaks in - # a variety of ways depending on how the server is configured. - # - Some servers will notice that the file isn't a compressible - # file and will leave the file alone and with an empty - # Content-Encoding - # - Some servers will notice that the file is already - # compressed and will leave the file alone and will add a - # Content-Encoding: gzip header - # - Some servers won't notice anything at all and will take - # a file that's already been compressed and compress it again - # and set the Content-Encoding: gzip header - # By setting this to request only the identity encoding We're - # hoping to eliminate the third case. Hopefully there does not - # exist a server which when given a file will notice it is - # already compressed and that you're not asking for a - # compressed file and will then decompress it before sending - # because if that's the case I don't think it'll ever be - # possible to make this work. - headers={"Accept-Encoding": "identity"}, - stream=True, - ) - resp.raise_for_status() - except requests.HTTPError as exc: - logger.critical( - "HTTP error %s while getting %s", exc.response.status_code, link, - ) - raise - - content_type = resp.headers.get('content-type', '') - filename = link.filename # fallback - # Have a look at the Content-Disposition header for a better guess - content_disposition = resp.headers.get('content-disposition') - if content_disposition: - type, params = cgi.parse_header(content_disposition) - # We use ``or`` here because we don't want to use an "empty" value - # from the filename param. - filename = params.get('filename') or filename - ext = splitext(filename)[1] - if not ext: - ext = mimetypes.guess_extension(content_type) - if ext: - filename += ext - if not ext and link.url != resp.url: - ext = os.path.splitext(resp.url)[1] - if ext: - filename += ext - file_path = os.path.join(temp_dir, filename) - with open(file_path, 'wb') as content_file: - _download_url(resp, link, content_file, hashes) - return file_path, content_type - - -def _check_download_dir(link, download_dir, hashes): - """ Check download_dir for previously downloaded file with correct hash - If a correct file is found return its path else None - """ - download_path = os.path.join(download_dir, link.filename) - if os.path.exists(download_path): - # If already downloaded, does its hash match? - logger.info('File was already downloaded %s', download_path) - if hashes: - try: - hashes.check_against_path(download_path) - except HashMismatch: - logger.warning( - 'Previously-downloaded file %s has bad hash. ' - 'Re-downloading.', - download_path - ) - os.unlink(download_path) - return None - return download_path - return None diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/exceptions.py b/classifier/myenv/lib/python3.6/site-packages/pip/exceptions.py deleted file mode 100644 index 50b527f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/exceptions.py +++ /dev/null @@ -1,244 +0,0 @@ -"""Exceptions used throughout package""" -from __future__ import absolute_import - -from itertools import chain, groupby, repeat - -from pip._vendor.six import iteritems - - -class PipError(Exception): - """Base pip exception""" - - -class InstallationError(PipError): - """General exception during installation""" - - -class UninstallationError(PipError): - """General exception during uninstallation""" - - -class DistributionNotFound(InstallationError): - """Raised when a distribution cannot be found to satisfy a requirement""" - - -class RequirementsFileParseError(InstallationError): - """Raised when a general error occurs parsing a requirements file line.""" - - -class BestVersionAlreadyInstalled(PipError): - """Raised when the most up-to-date version of a package is already - installed.""" - - -class BadCommand(PipError): - """Raised when virtualenv or a command is not found""" - - -class CommandError(PipError): - """Raised when there is an error in command-line arguments""" - - -class PreviousBuildDirError(PipError): - """Raised when there's a previous conflicting build directory""" - - -class InvalidWheelFilename(InstallationError): - """Invalid wheel filename.""" - - -class UnsupportedWheel(InstallationError): - """Unsupported wheel.""" - - -class HashErrors(InstallationError): - """Multiple HashError instances rolled into one for reporting""" - - def __init__(self): - self.errors = [] - - def append(self, error): - self.errors.append(error) - - def __str__(self): - lines = [] - self.errors.sort(key=lambda e: e.order) - for cls, errors_of_cls in groupby(self.errors, lambda e: e.__class__): - lines.append(cls.head) - lines.extend(e.body() for e in errors_of_cls) - if lines: - return '\n'.join(lines) - - def __nonzero__(self): - return bool(self.errors) - - def __bool__(self): - return self.__nonzero__() - - -class HashError(InstallationError): - """ - A failure to verify a package against known-good hashes - - :cvar order: An int sorting hash exception classes by difficulty of - recovery (lower being harder), so the user doesn't bother fretting - about unpinned packages when he has deeper issues, like VCS - dependencies, to deal with. Also keeps error reports in a - deterministic order. - :cvar head: A section heading for display above potentially many - exceptions of this kind - :ivar req: The InstallRequirement that triggered this error. This is - pasted on after the exception is instantiated, because it's not - typically available earlier. - - """ - req = None - head = '' - - def body(self): - """Return a summary of me for display under the heading. - - This default implementation simply prints a description of the - triggering requirement. - - :param req: The InstallRequirement that provoked this error, with - populate_link() having already been called - - """ - return ' %s' % self._requirement_name() - - def __str__(self): - return '%s\n%s' % (self.head, self.body()) - - def _requirement_name(self): - """Return a description of the requirement that triggered me. - - This default implementation returns long description of the req, with - line numbers - - """ - return str(self.req) if self.req else 'unknown package' - - -class VcsHashUnsupported(HashError): - """A hash was provided for a version-control-system-based requirement, but - we don't have a method for hashing those.""" - - order = 0 - head = ("Can't verify hashes for these requirements because we don't " - "have a way to hash version control repositories:") - - -class DirectoryUrlHashUnsupported(HashError): - """A hash was provided for a version-control-system-based requirement, but - we don't have a method for hashing those.""" - - order = 1 - head = ("Can't verify hashes for these file:// requirements because they " - "point to directories:") - - -class HashMissing(HashError): - """A hash was needed for a requirement but is absent.""" - - order = 2 - head = ('Hashes are required in --require-hashes mode, but they are ' - 'missing from some requirements. Here is a list of those ' - 'requirements along with the hashes their downloaded archives ' - 'actually had. Add lines like these to your requirements files to ' - 'prevent tampering. (If you did not enable --require-hashes ' - 'manually, note that it turns on automatically when any package ' - 'has a hash.)') - - def __init__(self, gotten_hash): - """ - :param gotten_hash: The hash of the (possibly malicious) archive we - just downloaded - """ - self.gotten_hash = gotten_hash - - def body(self): - from pip.utils.hashes import FAVORITE_HASH # Dodge circular import. - - package = None - if self.req: - # In the case of URL-based requirements, display the original URL - # seen in the requirements file rather than the package name, - # so the output can be directly copied into the requirements file. - package = (self.req.original_link if self.req.original_link - # In case someone feeds something downright stupid - # to InstallRequirement's constructor. - else getattr(self.req, 'req', None)) - return ' %s --hash=%s:%s' % (package or 'unknown package', - FAVORITE_HASH, - self.gotten_hash) - - -class HashUnpinned(HashError): - """A requirement had a hash specified but was not pinned to a specific - version.""" - - order = 3 - head = ('In --require-hashes mode, all requirements must have their ' - 'versions pinned with ==. These do not:') - - -class HashMismatch(HashError): - """ - Distribution file hash values don't match. - - :ivar package_name: The name of the package that triggered the hash - mismatch. Feel free to write to this after the exception is raise to - improve its error message. - - """ - order = 4 - head = ('THESE PACKAGES DO NOT MATCH THE HASHES FROM THE REQUIREMENTS ' - 'FILE. If you have updated the package versions, please update ' - 'the hashes. Otherwise, examine the package contents carefully; ' - 'someone may have tampered with them.') - - def __init__(self, allowed, gots): - """ - :param allowed: A dict of algorithm names pointing to lists of allowed - hex digests - :param gots: A dict of algorithm names pointing to hashes we - actually got from the files under suspicion - """ - self.allowed = allowed - self.gots = gots - - def body(self): - return ' %s:\n%s' % (self._requirement_name(), - self._hash_comparison()) - - def _hash_comparison(self): - """ - Return a comparison of actual and expected hash values. - - Example:: - - Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde - or 123451234512345123451234512345123451234512345 - Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef - - """ - def hash_then_or(hash_name): - # For now, all the decent hashes have 6-char names, so we can get - # away with hard-coding space literals. - return chain([hash_name], repeat(' or')) - - lines = [] - for hash_name, expecteds in iteritems(self.allowed): - prefix = hash_then_or(hash_name) - lines.extend((' Expected %s %s' % (next(prefix), e)) - for e in expecteds) - lines.append(' Got %s\n' % - self.gots[hash_name].hexdigest()) - prefix = ' or' - return '\n'.join(lines) - - -class UnsupportedPythonVersion(InstallationError): - """Unsupported python version according to Requires-Python package - metadata.""" diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/index.py b/classifier/myenv/lib/python3.6/site-packages/pip/index.py deleted file mode 100644 index f653f6e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/index.py +++ /dev/null @@ -1,1102 +0,0 @@ -"""Routines related to PyPI, indexes""" -from __future__ import absolute_import - -import logging -import cgi -from collections import namedtuple -import itertools -import sys -import os -import re -import mimetypes -import posixpath -import warnings - -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request - -from pip.compat import ipaddress -from pip.utils import ( - cached_property, splitext, normalize_path, - ARCHIVE_EXTENSIONS, SUPPORTED_EXTENSIONS, -) -from pip.utils.deprecation import RemovedInPip10Warning -from pip.utils.logging import indent_log -from pip.utils.packaging import check_requires_python -from pip.exceptions import ( - DistributionNotFound, BestVersionAlreadyInstalled, InvalidWheelFilename, - UnsupportedWheel, -) -from pip.download import HAS_TLS, is_url, path_to_url, url_to_path -from pip.wheel import Wheel, wheel_ext -from pip.pep425tags import get_supported -from pip._vendor import html5lib, requests, six -from pip._vendor.packaging.version import parse as parse_version -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging import specifiers -from pip._vendor.requests.exceptions import SSLError -from pip._vendor.distlib.compat import unescape - - -__all__ = ['FormatControl', 'fmt_ctl_handle_mutual_exclude', 'PackageFinder'] - - -SECURE_ORIGINS = [ - # protocol, hostname, port - # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) - ("https", "*", "*"), - ("*", "localhost", "*"), - ("*", "127.0.0.0/8", "*"), - ("*", "::1/128", "*"), - ("file", "*", None), - # ssh is always secure. - ("ssh", "*", "*"), -] - - -logger = logging.getLogger(__name__) - - -class InstallationCandidate(object): - - def __init__(self, project, version, location): - self.project = project - self.version = parse_version(version) - self.location = location - self._key = (self.project, self.version, self.location) - - def __repr__(self): - return "".format( - self.project, self.version, self.location, - ) - - def __hash__(self): - return hash(self._key) - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): - if not isinstance(other, InstallationCandidate): - return NotImplemented - - return method(self._key, other._key) - - -class PackageFinder(object): - """This finds packages. - - This is meant to match easy_install's technique for looking for - packages, by reading pages and looking for appropriate links. - """ - - def __init__(self, find_links, index_urls, allow_all_prereleases=False, - trusted_hosts=None, process_dependency_links=False, - session=None, format_control=None, platform=None, - versions=None, abi=None, implementation=None): - """Create a PackageFinder. - - :param format_control: A FormatControl object or None. Used to control - the selection of source packages / binary packages when consulting - the index and links. - :param platform: A string or None. If None, searches for packages - that are supported by the current system. Otherwise, will find - packages that can be built on the platform passed in. These - packages will only be downloaded for distribution: they will - not be built locally. - :param versions: A list of strings or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param abi: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - :param implementation: A string or None. This is passed directly - to pep425tags.py in the get_supported() method. - """ - if session is None: - raise TypeError( - "PackageFinder() missing 1 required keyword argument: " - "'session'" - ) - - # Build find_links. If an argument starts with ~, it may be - # a local file relative to a home directory. So try normalizing - # it and if it exists, use the normalized version. - # This is deliberately conservative - it might be fine just to - # blindly normalize anything starting with a ~... - self.find_links = [] - for link in find_links: - if link.startswith('~'): - new_link = normalize_path(link) - if os.path.exists(new_link): - link = new_link - self.find_links.append(link) - - self.index_urls = index_urls - self.dependency_links = [] - - # These are boring links that have already been logged somehow: - self.logged_links = set() - - self.format_control = format_control or FormatControl(set(), set()) - - # Domains that we won't emit warnings for when not using HTTPS - self.secure_origins = [ - ("*", host, "*") - for host in (trusted_hosts if trusted_hosts else []) - ] - - # Do we want to allow _all_ pre-releases? - self.allow_all_prereleases = allow_all_prereleases - - # Do we process dependency links? - self.process_dependency_links = process_dependency_links - - # The Session we'll use to make requests - self.session = session - - # The valid tags to check potential found wheel candidates against - self.valid_tags = get_supported( - versions=versions, - platform=platform, - abi=abi, - impl=implementation, - ) - - # If we don't have TLS enabled, then WARN if anyplace we're looking - # relies on TLS. - if not HAS_TLS: - for link in itertools.chain(self.index_urls, self.find_links): - parsed = urllib_parse.urlparse(link) - if parsed.scheme == "https": - logger.warning( - "pip is configured with locations that require " - "TLS/SSL, however the ssl module in Python is not " - "available." - ) - break - - def add_dependency_links(self, links): - # # FIXME: this shouldn't be global list this, it should only - # # apply to requirements of the package that specifies the - # # dependency_links value - # # FIXME: also, we should track comes_from (i.e., use Link) - if self.process_dependency_links: - warnings.warn( - "Dependency Links processing has been deprecated and will be " - "removed in a future release.", - RemovedInPip10Warning, - ) - self.dependency_links.extend(links) - - @staticmethod - def _sort_locations(locations, expand_dir=False): - """ - Sort locations into "files" (archives) and "urls", and return - a pair of lists (files,urls) - """ - files = [] - urls = [] - - # puts the url for the given file path into the appropriate list - def sort_path(path): - url = path_to_url(path) - if mimetypes.guess_type(url, strict=False)[0] == 'text/html': - urls.append(url) - else: - files.append(url) - - for url in locations: - - is_local_path = os.path.exists(url) - is_file_url = url.startswith('file:') - - if is_local_path or is_file_url: - if is_local_path: - path = url - else: - path = url_to_path(url) - if os.path.isdir(path): - if expand_dir: - path = os.path.realpath(path) - for item in os.listdir(path): - sort_path(os.path.join(path, item)) - elif is_file_url: - urls.append(url) - elif os.path.isfile(path): - sort_path(path) - else: - logger.warning( - "Url '%s' is ignored: it is neither a file " - "nor a directory.", url) - elif is_url(url): - # Only add url with clear scheme - urls.append(url) - else: - logger.warning( - "Url '%s' is ignored. It is either a non-existing " - "path or lacks a specific scheme.", url) - - return files, urls - - def _candidate_sort_key(self, candidate): - """ - Function used to generate link sort key for link tuples. - The greater the return value, the more preferred it is. - If not finding wheels, then sorted by version only. - If finding wheels, then the sort order is by version, then: - 1. existing installs - 2. wheels ordered via Wheel.support_index_min(self.valid_tags) - 3. source archives - Note: it was considered to embed this logic into the Link - comparison operators, but then different sdist links - with the same version, would have to be considered equal - """ - support_num = len(self.valid_tags) - if candidate.location.is_wheel: - # can raise InvalidWheelFilename - wheel = Wheel(candidate.location.filename) - if not wheel.supported(self.valid_tags): - raise UnsupportedWheel( - "%s is not a supported wheel for this platform. It " - "can't be sorted." % wheel.filename - ) - pri = -(wheel.support_index_min(self.valid_tags)) - else: # sdist - pri = -(support_num) - return (candidate.version, pri) - - def _validate_secure_origin(self, logger, location): - # Determine if this url used a secure transport mechanism - parsed = urllib_parse.urlparse(str(location)) - origin = (parsed.scheme, parsed.hostname, parsed.port) - - # The protocol to use to see if the protocol matches. - # Don't count the repository type as part of the protocol: in - # cases such as "git+ssh", only use "ssh". (I.e., Only verify against - # the last scheme.) - protocol = origin[0].rsplit('+', 1)[-1] - - # Determine if our origin is a secure origin by looking through our - # hardcoded list of secure origins, as well as any additional ones - # configured on this PackageFinder instance. - for secure_origin in (SECURE_ORIGINS + self.secure_origins): - if protocol != secure_origin[0] and secure_origin[0] != "*": - continue - - try: - # We need to do this decode dance to ensure that we have a - # unicode object, even on Python 2.x. - addr = ipaddress.ip_address( - origin[1] - if ( - isinstance(origin[1], six.text_type) or - origin[1] is None - ) - else origin[1].decode("utf8") - ) - network = ipaddress.ip_network( - secure_origin[1] - if isinstance(secure_origin[1], six.text_type) - else secure_origin[1].decode("utf8") - ) - except ValueError: - # We don't have both a valid address or a valid network, so - # we'll check this origin against hostnames. - if (origin[1] and - origin[1].lower() != secure_origin[1].lower() and - secure_origin[1] != "*"): - continue - else: - # We have a valid address and network, so see if the address - # is contained within the network. - if addr not in network: - continue - - # Check to see if the port patches - if (origin[2] != secure_origin[2] and - secure_origin[2] != "*" and - secure_origin[2] is not None): - continue - - # If we've gotten here, then this origin matches the current - # secure origin and we should return True - return True - - # If we've gotten to this point, then the origin isn't secure and we - # will not accept it as a valid location to search. We will however - # log a warning that we are ignoring it. - logger.warning( - "The repository located at %s is not a trusted or secure host and " - "is being ignored. If this repository is available via HTTPS it " - "is recommended to use HTTPS instead, otherwise you may silence " - "this warning and allow it anyways with '--trusted-host %s'.", - parsed.hostname, - parsed.hostname, - ) - - return False - - def _get_index_urls_locations(self, project_name): - """Returns the locations found via self.index_urls - - Checks the url_name on the main (first in the list) index and - use this url_name to produce all locations - """ - - def mkurl_pypi_url(url): - loc = posixpath.join( - url, - urllib_parse.quote(canonicalize_name(project_name))) - # For maximum compatibility with easy_install, ensure the path - # ends in a trailing slash. Although this isn't in the spec - # (and PyPI can handle it without the slash) some other index - # implementations might break if they relied on easy_install's - # behavior. - if not loc.endswith('/'): - loc = loc + '/' - return loc - - return [mkurl_pypi_url(url) for url in self.index_urls] - - def find_all_candidates(self, project_name): - """Find all available InstallationCandidate for project_name - - This checks index_urls, find_links and dependency_links. - All versions found are returned as an InstallationCandidate list. - - See _link_package_versions for details on which files are accepted - """ - index_locations = self._get_index_urls_locations(project_name) - index_file_loc, index_url_loc = self._sort_locations(index_locations) - fl_file_loc, fl_url_loc = self._sort_locations( - self.find_links, expand_dir=True) - dep_file_loc, dep_url_loc = self._sort_locations(self.dependency_links) - - file_locations = ( - Link(url) for url in itertools.chain( - index_file_loc, fl_file_loc, dep_file_loc) - ) - - # We trust every url that the user has given us whether it was given - # via --index-url or --find-links - # We explicitly do not trust links that came from dependency_links - # We want to filter out any thing which does not have a secure origin. - url_locations = [ - link for link in itertools.chain( - (Link(url) for url in index_url_loc), - (Link(url) for url in fl_url_loc), - (Link(url) for url in dep_url_loc), - ) - if self._validate_secure_origin(logger, link) - ] - - logger.debug('%d location(s) to search for versions of %s:', - len(url_locations), project_name) - - for location in url_locations: - logger.debug('* %s', location) - - canonical_name = canonicalize_name(project_name) - formats = fmt_ctl_formats(self.format_control, canonical_name) - search = Search(project_name, canonical_name, formats) - find_links_versions = self._package_versions( - # We trust every directly linked archive in find_links - (Link(url, '-f') for url in self.find_links), - search - ) - - page_versions = [] - for page in self._get_pages(url_locations, project_name): - logger.debug('Analyzing links from page %s', page.url) - with indent_log(): - page_versions.extend( - self._package_versions(page.links, search) - ) - - dependency_versions = self._package_versions( - (Link(url) for url in self.dependency_links), search - ) - if dependency_versions: - logger.debug( - 'dependency_links found: %s', - ', '.join([ - version.location.url for version in dependency_versions - ]) - ) - - file_versions = self._package_versions(file_locations, search) - if file_versions: - file_versions.sort(reverse=True) - logger.debug( - 'Local files found: %s', - ', '.join([ - url_to_path(candidate.location.url) - for candidate in file_versions - ]) - ) - - # This is an intentional priority ordering - return ( - file_versions + find_links_versions + page_versions + - dependency_versions - ) - - def find_requirement(self, req, upgrade): - """Try to find a Link matching req - - Expects req, an InstallRequirement and upgrade, a boolean - Returns a Link if found, - Raises DistributionNotFound or BestVersionAlreadyInstalled otherwise - """ - all_candidates = self.find_all_candidates(req.name) - - # Filter out anything which doesn't match our specifier - compatible_versions = set( - req.specifier.filter( - # We turn the version object into a str here because otherwise - # when we're debundled but setuptools isn't, Python will see - # packaging.version.Version and - # pkg_resources._vendor.packaging.version.Version as different - # types. This way we'll use a str as a common data interchange - # format. If we stop using the pkg_resources provided specifier - # and start using our own, we can drop the cast to str(). - [str(c.version) for c in all_candidates], - prereleases=( - self.allow_all_prereleases - if self.allow_all_prereleases else None - ), - ) - ) - applicable_candidates = [ - # Again, converting to str to deal with debundling. - c for c in all_candidates if str(c.version) in compatible_versions - ] - - if applicable_candidates: - best_candidate = max(applicable_candidates, - key=self._candidate_sort_key) - else: - best_candidate = None - - if req.satisfied_by is not None: - installed_version = parse_version(req.satisfied_by.version) - else: - installed_version = None - - if installed_version is None and best_candidate is None: - logger.critical( - 'Could not find a version that satisfies the requirement %s ' - '(from versions: %s)', - req, - ', '.join( - sorted( - set(str(c.version) for c in all_candidates), - key=parse_version, - ) - ) - ) - - raise DistributionNotFound( - 'No matching distribution found for %s' % req - ) - - best_installed = False - if installed_version and ( - best_candidate is None or - best_candidate.version <= installed_version): - best_installed = True - - if not upgrade and installed_version is not None: - if best_installed: - logger.debug( - 'Existing installed version (%s) is most up-to-date and ' - 'satisfies requirement', - installed_version, - ) - else: - logger.debug( - 'Existing installed version (%s) satisfies requirement ' - '(most up-to-date version is %s)', - installed_version, - best_candidate.version, - ) - return None - - if best_installed: - # We have an existing version, and its the best version - logger.debug( - 'Installed version (%s) is most up-to-date (past versions: ' - '%s)', - installed_version, - ', '.join(sorted(compatible_versions, key=parse_version)) or - "none", - ) - raise BestVersionAlreadyInstalled - - logger.debug( - 'Using version %s (newest of versions: %s)', - best_candidate.version, - ', '.join(sorted(compatible_versions, key=parse_version)) - ) - return best_candidate.location - - def _get_pages(self, locations, project_name): - """ - Yields (page, page_url) from the given locations, skipping - locations that have errors. - """ - seen = set() - for location in locations: - if location in seen: - continue - seen.add(location) - - page = self._get_page(location) - if page is None: - continue - - yield page - - _py_version_re = re.compile(r'-py([123]\.?[0-9]?)$') - - def _sort_links(self, links): - """ - Returns elements of links in order, non-egg links first, egg links - second, while eliminating duplicates - """ - eggs, no_eggs = [], [] - seen = set() - for link in links: - if link not in seen: - seen.add(link) - if link.egg_fragment: - eggs.append(link) - else: - no_eggs.append(link) - return no_eggs + eggs - - def _package_versions(self, links, search): - result = [] - for link in self._sort_links(links): - v = self._link_package_versions(link, search) - if v is not None: - result.append(v) - return result - - def _log_skipped_link(self, link, reason): - if link not in self.logged_links: - logger.debug('Skipping link %s; %s', link, reason) - self.logged_links.add(link) - - def _link_package_versions(self, link, search): - """Return an InstallationCandidate or None""" - version = None - if link.egg_fragment: - egg_info = link.egg_fragment - ext = link.ext - else: - egg_info, ext = link.splitext() - if not ext: - self._log_skipped_link(link, 'not a file') - return - if ext not in SUPPORTED_EXTENSIONS: - self._log_skipped_link( - link, 'unsupported archive format: %s' % ext) - return - if "binary" not in search.formats and ext == wheel_ext: - self._log_skipped_link( - link, 'No binaries permitted for %s' % search.supplied) - return - if "macosx10" in link.path and ext == '.zip': - self._log_skipped_link(link, 'macosx10 one') - return - if ext == wheel_ext: - try: - wheel = Wheel(link.filename) - except InvalidWheelFilename: - self._log_skipped_link(link, 'invalid wheel filename') - return - if canonicalize_name(wheel.name) != search.canonical: - self._log_skipped_link( - link, 'wrong project name (not %s)' % search.supplied) - return - - if not wheel.supported(self.valid_tags): - self._log_skipped_link( - link, 'it is not compatible with this Python') - return - - version = wheel.version - - # This should be up by the search.ok_binary check, but see issue 2700. - if "source" not in search.formats and ext != wheel_ext: - self._log_skipped_link( - link, 'No sources permitted for %s' % search.supplied) - return - - if not version: - version = egg_info_matches(egg_info, search.supplied, link) - if version is None: - self._log_skipped_link( - link, 'wrong project name (not %s)' % search.supplied) - return - - match = self._py_version_re.search(version) - if match: - version = version[:match.start()] - py_version = match.group(1) - if py_version != sys.version[:3]: - self._log_skipped_link( - link, 'Python version is incorrect') - return - try: - support_this_python = check_requires_python(link.requires_python) - except specifiers.InvalidSpecifier: - logger.debug("Package %s has an invalid Requires-Python entry: %s", - link.filename, link.requires_python) - support_this_python = True - - if not support_this_python: - logger.debug("The package %s is incompatible with the python" - "version in use. Acceptable python versions are:%s", - link, link.requires_python) - return - logger.debug('Found link %s, version: %s', link, version) - - return InstallationCandidate(search.supplied, version, link) - - def _get_page(self, link): - return HTMLPage.get_page(link, session=self.session) - - -def egg_info_matches( - egg_info, search_name, link, - _egg_info_re=re.compile(r'([a-z0-9_.]+)-([a-z0-9_.!+-]+)', re.I)): - """Pull the version part out of a string. - - :param egg_info: The string to parse. E.g. foo-2.1 - :param search_name: The name of the package this belongs to. None to - infer the name. Note that this cannot unambiguously parse strings - like foo-2-2 which might be foo, 2-2 or foo-2, 2. - :param link: The link the string came from, for logging on failure. - """ - match = _egg_info_re.search(egg_info) - if not match: - logger.debug('Could not parse version from link: %s', link) - return None - if search_name is None: - full_match = match.group(0) - return full_match[full_match.index('-'):] - name = match.group(0).lower() - # To match the "safe" name that pkg_resources creates: - name = name.replace('_', '-') - # project name and version must be separated by a dash - look_for = search_name.lower() + "-" - if name.startswith(look_for): - return match.group(0)[len(look_for):] - else: - return None - - -class HTMLPage(object): - """Represents one page, along with its URL""" - - def __init__(self, content, url, headers=None): - # Determine if we have any encoding information in our headers - encoding = None - if headers and "Content-Type" in headers: - content_type, params = cgi.parse_header(headers["Content-Type"]) - - if "charset" in params: - encoding = params['charset'] - - self.content = content - self.parsed = html5lib.parse( - self.content, - transport_encoding=encoding, - namespaceHTMLElements=False, - ) - self.url = url - self.headers = headers - - def __str__(self): - return self.url - - @classmethod - def get_page(cls, link, skip_archives=True, session=None): - if session is None: - raise TypeError( - "get_page() missing 1 required keyword argument: 'session'" - ) - - url = link.url - url = url.split('#', 1)[0] - - # Check for VCS schemes that do not support lookup as web pages. - from pip.vcs import VcsSupport - for scheme in VcsSupport.schemes: - if url.lower().startswith(scheme) and url[len(scheme)] in '+:': - logger.debug('Cannot look at %s URL %s', scheme, link) - return None - - try: - if skip_archives: - filename = link.filename - for bad_ext in ARCHIVE_EXTENSIONS: - if filename.endswith(bad_ext): - content_type = cls._get_content_type( - url, session=session, - ) - if content_type.lower().startswith('text/html'): - break - else: - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - logger.debug('Getting page %s', url) - - # Tack index.html onto file:// URLs that point to directories - (scheme, netloc, path, params, query, fragment) = \ - urllib_parse.urlparse(url) - if (scheme == 'file' and - os.path.isdir(urllib_request.url2pathname(path))): - # add trailing slash if not present so urljoin doesn't trim - # final segment - if not url.endswith('/'): - url += '/' - url = urllib_parse.urljoin(url, 'index.html') - logger.debug(' file: URL is directory, getting %s', url) - - resp = session.get( - url, - headers={ - "Accept": "text/html", - "Cache-Control": "max-age=600", - }, - ) - resp.raise_for_status() - - # The check for archives above only works if the url ends with - # something that looks like an archive. However that is not a - # requirement of an url. Unless we issue a HEAD request on every - # url we cannot know ahead of time for sure if something is HTML - # or not. However we can check after we've downloaded it. - content_type = resp.headers.get('Content-Type', 'unknown') - if not content_type.lower().startswith("text/html"): - logger.debug( - 'Skipping page %s because of Content-Type: %s', - link, - content_type, - ) - return - - inst = cls(resp.content, resp.url, resp.headers) - except requests.HTTPError as exc: - cls._handle_fail(link, exc, url) - except SSLError as exc: - reason = ("There was a problem confirming the ssl certificate: " - "%s" % exc) - cls._handle_fail(link, reason, url, meth=logger.info) - except requests.ConnectionError as exc: - cls._handle_fail(link, "connection error: %s" % exc, url) - except requests.Timeout: - cls._handle_fail(link, "timed out", url) - else: - return inst - - @staticmethod - def _handle_fail(link, reason, url, meth=None): - if meth is None: - meth = logger.debug - - meth("Could not fetch URL %s: %s - skipping", link, reason) - - @staticmethod - def _get_content_type(url, session): - """Get the Content-Type of the given url, using a HEAD request""" - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(url) - if scheme not in ('http', 'https'): - # FIXME: some warning or something? - # assertion error? - return '' - - resp = session.head(url, allow_redirects=True) - resp.raise_for_status() - - return resp.headers.get("Content-Type", "") - - @cached_property - def base_url(self): - bases = [ - x for x in self.parsed.findall(".//base") - if x.get("href") is not None - ] - if bases and bases[0].get("href"): - return bases[0].get("href") - else: - return self.url - - @property - def links(self): - """Yields all links in the page""" - for anchor in self.parsed.findall(".//a"): - if anchor.get("href"): - href = anchor.get("href") - url = self.clean_link( - urllib_parse.urljoin(self.base_url, href) - ) - pyrequire = anchor.get('data-requires-python') - pyrequire = unescape(pyrequire) if pyrequire else None - yield Link(url, self, requires_python=pyrequire) - - _clean_re = re.compile(r'[^a-z0-9$&+,/:;=?@.#%_\\|-]', re.I) - - def clean_link(self, url): - """Makes sure a link is fully encoded. That is, if a ' ' shows up in - the link, it will be rewritten to %20 (while not over-quoting - % or other characters).""" - return self._clean_re.sub( - lambda match: '%%%2x' % ord(match.group(0)), url) - - -class Link(object): - - def __init__(self, url, comes_from=None, requires_python=None): - """ - Object representing a parsed link from https://pypi.python.org/simple/* - - url: - url of the resource pointed to (href of the link) - comes_from: - instance of HTMLPage where the link was found, or string. - requires_python: - String containing the `Requires-Python` metadata field, specified - in PEP 345. This may be specified by a data-requires-python - attribute in the HTML link tag, as described in PEP 503. - """ - - # url can be a UNC windows share - if url.startswith('\\\\'): - url = path_to_url(url) - - self.url = url - self.comes_from = comes_from - self.requires_python = requires_python if requires_python else None - - def __str__(self): - if self.requires_python: - rp = ' (requires-python:%s)' % self.requires_python - else: - rp = '' - if self.comes_from: - return '%s (from %s)%s' % (self.url, self.comes_from, rp) - else: - return str(self.url) - - def __repr__(self): - return '' % self - - def __eq__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url == other.url - - def __ne__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url != other.url - - def __lt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url < other.url - - def __le__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url <= other.url - - def __gt__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url > other.url - - def __ge__(self, other): - if not isinstance(other, Link): - return NotImplemented - return self.url >= other.url - - def __hash__(self): - return hash(self.url) - - @property - def filename(self): - _, netloc, path, _, _ = urllib_parse.urlsplit(self.url) - name = posixpath.basename(path.rstrip('/')) or netloc - name = urllib_parse.unquote(name) - assert name, ('URL %r produced no filename' % self.url) - return name - - @property - def scheme(self): - return urllib_parse.urlsplit(self.url)[0] - - @property - def netloc(self): - return urllib_parse.urlsplit(self.url)[1] - - @property - def path(self): - return urllib_parse.unquote(urllib_parse.urlsplit(self.url)[2]) - - def splitext(self): - return splitext(posixpath.basename(self.path.rstrip('/'))) - - @property - def ext(self): - return self.splitext()[1] - - @property - def url_without_fragment(self): - scheme, netloc, path, query, fragment = urllib_parse.urlsplit(self.url) - return urllib_parse.urlunsplit((scheme, netloc, path, query, None)) - - _egg_fragment_re = re.compile(r'[#&]egg=([^&]*)') - - @property - def egg_fragment(self): - match = self._egg_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _subdirectory_fragment_re = re.compile(r'[#&]subdirectory=([^&]*)') - - @property - def subdirectory_fragment(self): - match = self._subdirectory_fragment_re.search(self.url) - if not match: - return None - return match.group(1) - - _hash_re = re.compile( - r'(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)' - ) - - @property - def hash(self): - match = self._hash_re.search(self.url) - if match: - return match.group(2) - return None - - @property - def hash_name(self): - match = self._hash_re.search(self.url) - if match: - return match.group(1) - return None - - @property - def show_url(self): - return posixpath.basename(self.url.split('#', 1)[0].split('?', 1)[0]) - - @property - def is_wheel(self): - return self.ext == wheel_ext - - @property - def is_artifact(self): - """ - Determines if this points to an actual artifact (e.g. a tarball) or if - it points to an "abstract" thing like a path or a VCS location. - """ - from pip.vcs import vcs - - if self.scheme in vcs.all_schemes: - return False - - return True - - -FormatControl = namedtuple('FormatControl', 'no_binary only_binary') -"""This object has two fields, no_binary and only_binary. - -If a field is falsy, it isn't set. If it is {':all:'}, it should match all -packages except those listed in the other field. Only one field can be set -to {':all:'} at a time. The rest of the time exact package name matches -are listed, with any given package only showing up in one field at a time. -""" - - -def fmt_ctl_handle_mutual_exclude(value, target, other): - new = value.split(',') - while ':all:' in new: - other.clear() - target.clear() - target.add(':all:') - del new[:new.index(':all:') + 1] - if ':none:' not in new: - # Without a none, we want to discard everything as :all: covers it - return - for name in new: - if name == ':none:': - target.clear() - continue - name = canonicalize_name(name) - other.discard(name) - target.add(name) - - -def fmt_ctl_formats(fmt_ctl, canonical_name): - result = set(["binary", "source"]) - if canonical_name in fmt_ctl.only_binary: - result.discard('source') - elif canonical_name in fmt_ctl.no_binary: - result.discard('binary') - elif ':all:' in fmt_ctl.only_binary: - result.discard('source') - elif ':all:' in fmt_ctl.no_binary: - result.discard('binary') - return frozenset(result) - - -def fmt_ctl_no_binary(fmt_ctl): - fmt_ctl_handle_mutual_exclude( - ':all:', fmt_ctl.no_binary, fmt_ctl.only_binary) - - -def fmt_ctl_no_use_wheel(fmt_ctl): - fmt_ctl_no_binary(fmt_ctl) - warnings.warn( - '--no-use-wheel is deprecated and will be removed in the future. ' - ' Please use --no-binary :all: instead.', RemovedInPip10Warning, - stacklevel=2) - - -Search = namedtuple('Search', 'supplied canonical formats') -"""Capture key aspects of a search. - -:attribute supplied: The user supplied package. -:attribute canonical: The canonical package name. -:attribute formats: The formats allowed for this package. Should be a set - with 'binary' or 'source' or both in it. -""" diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/locations.py b/classifier/myenv/lib/python3.6/site-packages/pip/locations.py deleted file mode 100644 index e598ef1..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/locations.py +++ /dev/null @@ -1,182 +0,0 @@ -"""Locations where we look for configs, install stuff, etc""" -from __future__ import absolute_import - -import os -import os.path -import site -import sys - -from distutils import sysconfig -from distutils.command.install import install, SCHEME_KEYS # noqa - -from pip.compat import WINDOWS, expanduser -from pip.utils import appdirs - - -# Application Directories -USER_CACHE_DIR = appdirs.user_cache_dir("pip") - - -DELETE_MARKER_MESSAGE = '''\ -This file is placed here by pip to indicate the source was put -here by pip. - -Once this package is successfully installed this source code will be -deleted (unless you remove this file). -''' -PIP_DELETE_MARKER_FILENAME = 'pip-delete-this-directory.txt' - - -def write_delete_marker_file(directory): - """ - Write the pip delete marker file into this directory. - """ - filepath = os.path.join(directory, PIP_DELETE_MARKER_FILENAME) - with open(filepath, 'w') as marker_fp: - marker_fp.write(DELETE_MARKER_MESSAGE) - - -def running_under_virtualenv(): - """ - Return True if we're running inside a virtualenv, False otherwise. - - """ - if hasattr(sys, 'real_prefix'): - return True - elif sys.prefix != getattr(sys, "base_prefix", sys.prefix): - return True - - return False - - -def virtualenv_no_global(): - """ - Return True if in a venv and no system site packages. - """ - # this mirrors the logic in virtualenv.py for locating the - # no-global-site-packages.txt file - site_mod_dir = os.path.dirname(os.path.abspath(site.__file__)) - no_global_file = os.path.join(site_mod_dir, 'no-global-site-packages.txt') - if running_under_virtualenv() and os.path.isfile(no_global_file): - return True - - -if running_under_virtualenv(): - src_prefix = os.path.join(sys.prefix, 'src') -else: - # FIXME: keep src in cwd for now (it is not a temporary folder) - try: - src_prefix = os.path.join(os.getcwd(), 'src') - except OSError: - # In case the current working directory has been renamed or deleted - sys.exit( - "The folder you are executing pip from can no longer be found." - ) - -# under macOS + virtualenv sys.prefix is not properly resolved -# it is something like /path/to/python/bin/.. -# Note: using realpath due to tmp dirs on OSX being symlinks -src_prefix = os.path.abspath(src_prefix) - -# FIXME doesn't account for venv linked to global site-packages - -site_packages = sysconfig.get_python_lib() -user_site = site.USER_SITE -user_dir = expanduser('~') -if WINDOWS: - bin_py = os.path.join(sys.prefix, 'Scripts') - bin_user = os.path.join(user_site, 'Scripts') - # buildout uses 'bin' on Windows too? - if not os.path.exists(bin_py): - bin_py = os.path.join(sys.prefix, 'bin') - bin_user = os.path.join(user_site, 'bin') - - config_basename = 'pip.ini' - - legacy_storage_dir = os.path.join(user_dir, 'pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) -else: - bin_py = os.path.join(sys.prefix, 'bin') - bin_user = os.path.join(user_site, 'bin') - - config_basename = 'pip.conf' - - legacy_storage_dir = os.path.join(user_dir, '.pip') - legacy_config_file = os.path.join( - legacy_storage_dir, - config_basename, - ) - - # Forcing to use /usr/local/bin for standard macOS framework installs - # Also log to ~/Library/Logs/ for use with the Console.app log viewer - if sys.platform[:6] == 'darwin' and sys.prefix[:16] == '/System/Library/': - bin_py = '/usr/local/bin' - -site_config_files = [ - os.path.join(path, config_basename) - for path in appdirs.site_config_dirs('pip') -] - - -def distutils_scheme(dist_name, user=False, home=None, root=None, - isolated=False, prefix=None): - """ - Return a distutils install scheme - """ - from distutils.dist import Distribution - - scheme = {} - - if isolated: - extra_dist_args = {"script_args": ["--no-user-cfg"]} - else: - extra_dist_args = {} - dist_args = {'name': dist_name} - dist_args.update(extra_dist_args) - - d = Distribution(dist_args) - d.parse_config_files() - i = d.get_command_obj('install', create=True) - # NOTE: setting user or home has the side-effect of creating the home dir - # or user base for installations during finalize_options() - # ideally, we'd prefer a scheme class that has no side-effects. - assert not (user and prefix), "user={0} prefix={1}".format(user, prefix) - i.user = user or i.user - if user: - i.prefix = "" - i.prefix = prefix or i.prefix - i.home = home or i.home - i.root = root or i.root - i.finalize_options() - for key in SCHEME_KEYS: - scheme[key] = getattr(i, 'install_' + key) - - # install_lib specified in setup.cfg should install *everything* - # into there (i.e. it takes precedence over both purelib and - # platlib). Note, i.install_lib is *always* set after - # finalize_options(); we only want to override here if the user - # has explicitly requested it hence going back to the config - if 'install_lib' in d.get_option_dict('install'): - scheme.update(dict(purelib=i.install_lib, platlib=i.install_lib)) - - if running_under_virtualenv(): - scheme['headers'] = os.path.join( - sys.prefix, - 'include', - 'site', - 'python' + sys.version[:3], - dist_name, - ) - - if root is not None: - path_no_drive = os.path.splitdrive( - os.path.abspath(scheme["headers"]))[1] - scheme["headers"] = os.path.join( - root, - path_no_drive[1:], - ) - - return scheme diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/models/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/models/__init__.py deleted file mode 100644 index 1d727d7..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/models/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from pip.models.index import Index, PyPI - - -__all__ = ["Index", "PyPI"] diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index a866b74..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/index.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/index.cpython-36.pyc deleted file mode 100644 index c2c2277..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/models/__pycache__/index.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/models/index.py b/classifier/myenv/lib/python3.6/site-packages/pip/models/index.py deleted file mode 100644 index be99119..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/models/index.py +++ /dev/null @@ -1,16 +0,0 @@ -from pip._vendor.six.moves.urllib import parse as urllib_parse - - -class Index(object): - def __init__(self, url): - self.url = url - self.netloc = urllib_parse.urlsplit(url).netloc - self.simple_url = self.url_to_path('simple') - self.pypi_url = self.url_to_path('pypi') - self.pip_json_url = self.url_to_path('pypi/pip/json') - - def url_to_path(self, path): - return urllib_parse.urljoin(self.url, path) - - -PyPI = Index('https://pypi.python.org/') diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/operations/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 66e600b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/check.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/check.cpython-36.pyc deleted file mode 100644 index 0741095..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/check.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/freeze.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/freeze.cpython-36.pyc deleted file mode 100644 index 12f14fa..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/operations/__pycache__/freeze.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/check.py b/classifier/myenv/lib/python3.6/site-packages/pip/operations/check.py deleted file mode 100644 index 2cf67aa..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/operations/check.py +++ /dev/null @@ -1,49 +0,0 @@ - - -def check_requirements(installed_dists): - missing_reqs_dict = {} - incompatible_reqs_dict = {} - - for dist in installed_dists: - key = '%s==%s' % (dist.project_name, dist.version) - - missing_reqs = list(get_missing_reqs(dist, installed_dists)) - if missing_reqs: - missing_reqs_dict[key] = missing_reqs - - incompatible_reqs = list(get_incompatible_reqs( - dist, installed_dists)) - if incompatible_reqs: - incompatible_reqs_dict[key] = incompatible_reqs - - return (missing_reqs_dict, incompatible_reqs_dict) - - -def get_missing_reqs(dist, installed_dists): - """Return all of the requirements of `dist` that aren't present in - `installed_dists`. - - """ - installed_names = set(d.project_name.lower() for d in installed_dists) - missing_requirements = set() - - for requirement in dist.requires(): - if requirement.project_name.lower() not in installed_names: - missing_requirements.add(requirement) - yield requirement - - -def get_incompatible_reqs(dist, installed_dists): - """Return all of the requirements of `dist` that are present in - `installed_dists`, but have incompatible versions. - - """ - installed_dists_by_name = {} - for installed_dist in installed_dists: - installed_dists_by_name[installed_dist.project_name] = installed_dist - - for requirement in dist.requires(): - present_dist = installed_dists_by_name.get(requirement.project_name) - - if present_dist and present_dist not in requirement: - yield (requirement, present_dist) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/operations/freeze.py b/classifier/myenv/lib/python3.6/site-packages/pip/operations/freeze.py deleted file mode 100644 index 920c2c1..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/operations/freeze.py +++ /dev/null @@ -1,132 +0,0 @@ -from __future__ import absolute_import - -import logging -import re - -import pip -from pip.req import InstallRequirement -from pip.req.req_file import COMMENT_RE -from pip.utils import get_installed_distributions -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.pkg_resources import RequirementParseError - - -logger = logging.getLogger(__name__) - - -def freeze( - requirement=None, - find_links=None, local_only=None, user_only=None, skip_regex=None, - default_vcs=None, - isolated=False, - wheel_cache=None, - skip=()): - find_links = find_links or [] - skip_match = None - - if skip_regex: - skip_match = re.compile(skip_regex).search - - dependency_links = [] - - for dist in pkg_resources.working_set: - if dist.has_metadata('dependency_links.txt'): - dependency_links.extend( - dist.get_metadata_lines('dependency_links.txt') - ) - for link in find_links: - if '#egg=' in link: - dependency_links.append(link) - for link in find_links: - yield '-f %s' % link - installations = {} - for dist in get_installed_distributions(local_only=local_only, - skip=(), - user_only=user_only): - try: - req = pip.FrozenRequirement.from_dist( - dist, - dependency_links - ) - except RequirementParseError: - logger.warning( - "Could not parse requirement: %s", - dist.project_name - ) - continue - installations[req.name] = req - - if requirement: - # the options that don't get turned into an InstallRequirement - # should only be emitted once, even if the same option is in multiple - # requirements files, so we need to keep track of what has been emitted - # so that we don't emit it again if it's seen again - emitted_options = set() - for req_file_path in requirement: - with open(req_file_path) as req_file: - for line in req_file: - if (not line.strip() or - line.strip().startswith('#') or - (skip_match and skip_match(line)) or - line.startswith(( - '-r', '--requirement', - '-Z', '--always-unzip', - '-f', '--find-links', - '-i', '--index-url', - '--pre', - '--trusted-host', - '--process-dependency-links', - '--extra-index-url'))): - line = line.rstrip() - if line not in emitted_options: - emitted_options.add(line) - yield line - continue - - if line.startswith('-e') or line.startswith('--editable'): - if line.startswith('-e'): - line = line[2:].strip() - else: - line = line[len('--editable'):].strip().lstrip('=') - line_req = InstallRequirement.from_editable( - line, - default_vcs=default_vcs, - isolated=isolated, - wheel_cache=wheel_cache, - ) - else: - line_req = InstallRequirement.from_line( - COMMENT_RE.sub('', line).strip(), - isolated=isolated, - wheel_cache=wheel_cache, - ) - - if not line_req.name: - logger.info( - "Skipping line in requirement file [%s] because " - "it's not clear what it would install: %s", - req_file_path, line.strip(), - ) - logger.info( - " (add #egg=PackageName to the URL to avoid" - " this warning)" - ) - elif line_req.name not in installations: - logger.warning( - "Requirement file [%s] contains %s, but that " - "package is not installed", - req_file_path, COMMENT_RE.sub('', line).strip(), - ) - else: - yield str(installations[line_req.name]).rstrip() - del installations[line_req.name] - - yield( - '## The following requirements were added by ' - 'pip freeze:' - ) - for installation in sorted( - installations.values(), key=lambda x: x.name.lower()): - if canonicalize_name(installation.name) not in skip: - yield str(installation).rstrip() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/pep425tags.py b/classifier/myenv/lib/python3.6/site-packages/pip/pep425tags.py deleted file mode 100644 index ad202ef..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/pep425tags.py +++ /dev/null @@ -1,324 +0,0 @@ -"""Generate and work with PEP 425 Compatibility Tags.""" -from __future__ import absolute_import - -import re -import sys -import warnings -import platform -import logging - -try: - import sysconfig -except ImportError: # pragma nocover - # Python < 2.7 - import distutils.sysconfig as sysconfig -import distutils.util - -from pip.compat import OrderedDict -import pip.utils.glibc - -logger = logging.getLogger(__name__) - -_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') - - -def get_config_var(var): - try: - return sysconfig.get_config_var(var) - except IOError as e: # Issue #1074 - warnings.warn("{0}".format(e), RuntimeWarning) - return None - - -def get_abbr_impl(): - """Return abbreviated implementation name.""" - if hasattr(sys, 'pypy_version_info'): - pyimpl = 'pp' - elif sys.platform.startswith('java'): - pyimpl = 'jy' - elif sys.platform == 'cli': - pyimpl = 'ip' - else: - pyimpl = 'cp' - return pyimpl - - -def get_impl_ver(): - """Return implementation version.""" - impl_ver = get_config_var("py_version_nodot") - if not impl_ver or get_abbr_impl() == 'pp': - impl_ver = ''.join(map(str, get_impl_version_info())) - return impl_ver - - -def get_impl_version_info(): - """Return sys.version_info-like tuple for use in decrementing the minor - version.""" - if get_abbr_impl() == 'pp': - # as per https://github.com/pypa/pip/issues/2882 - return (sys.version_info[0], sys.pypy_version_info.major, - sys.pypy_version_info.minor) - else: - return sys.version_info[0], sys.version_info[1] - - -def get_impl_tag(): - """ - Returns the Tag for this specific implementation. - """ - return "{0}{1}".format(get_abbr_impl(), get_impl_ver()) - - -def get_flag(var, fallback, expected=True, warn=True): - """Use a fallback method for determining SOABI flags if the needed config - var is unset or unavailable.""" - val = get_config_var(var) - if val is None: - if warn: - logger.debug("Config variable '%s' is unset, Python ABI tag may " - "be incorrect", var) - return fallback() - return val == expected - - -def get_abi_tag(): - """Return the ABI tag based on SOABI (if available) or emulate SOABI - (CPython 2, PyPy).""" - soabi = get_config_var('SOABI') - impl = get_abbr_impl() - if not soabi and impl in ('cp', 'pp') and hasattr(sys, 'maxunicode'): - d = '' - m = '' - u = '' - if get_flag('Py_DEBUG', - lambda: hasattr(sys, 'gettotalrefcount'), - warn=(impl == 'cp')): - d = 'd' - if get_flag('WITH_PYMALLOC', - lambda: impl == 'cp', - warn=(impl == 'cp')): - m = 'm' - if get_flag('Py_UNICODE_SIZE', - lambda: sys.maxunicode == 0x10ffff, - expected=4, - warn=(impl == 'cp' and - sys.version_info < (3, 3))) \ - and sys.version_info < (3, 3): - u = 'u' - abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) - elif soabi and soabi.startswith('cpython-'): - abi = 'cp' + soabi.split('-')[1] - elif soabi: - abi = soabi.replace('.', '_').replace('-', '_') - else: - abi = None - return abi - - -def _is_running_32bit(): - return sys.maxsize == 2147483647 - - -def get_platform(): - """Return our platform name 'win32', 'linux_x86_64'""" - if sys.platform == 'darwin': - # distutils.util.get_platform() returns the release based on the value - # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may - # be significantly older than the user's current machine. - release, _, machine = platform.mac_ver() - split_ver = release.split('.') - - if machine == "x86_64" and _is_running_32bit(): - machine = "i386" - elif machine == "ppc64" and _is_running_32bit(): - machine = "ppc" - - return 'macosx_{0}_{1}_{2}'.format(split_ver[0], split_ver[1], machine) - - # XXX remove distutils dependency - result = distutils.util.get_platform().replace('.', '_').replace('-', '_') - if result == "linux_x86_64" and _is_running_32bit(): - # 32 bit Python program (running on a 64 bit Linux): pip should only - # install and run 32 bit compiled extensions in that case. - result = "linux_i686" - - return result - - -def is_manylinux1_compatible(): - # Only Linux, and only x86-64 / i686 - if get_platform() not in ("linux_x86_64", "linux_i686"): - return False - - # Check for presence of _manylinux module - try: - import _manylinux - return bool(_manylinux.manylinux1_compatible) - except (ImportError, AttributeError): - # Fall through to heuristic check below - pass - - # Check glibc version. CentOS 5 uses glibc 2.5. - return pip.utils.glibc.have_compatible_glibc(2, 5) - - -def get_darwin_arches(major, minor, machine): - """Return a list of supported arches (including group arches) for - the given major, minor and machine architecture of an macOS machine. - """ - arches = [] - - def _supports_arch(major, minor, arch): - # Looking at the application support for macOS versions in the chart - # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears - # our timeline looks roughly like: - # - # 10.0 - Introduces ppc support. - # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 - # and x86_64 support is CLI only, and cannot be used for GUI - # applications. - # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. - # 10.6 - Drops support for ppc64 - # 10.7 - Drops support for ppc - # - # Given that we do not know if we're installing a CLI or a GUI - # application, we must be conservative and assume it might be a GUI - # application and behave as if ppc64 and x86_64 support did not occur - # until 10.5. - # - # Note: The above information is taken from the "Application support" - # column in the chart not the "Processor support" since I believe - # that we care about what instruction sets an application can use - # not which processors the OS supports. - if arch == 'ppc': - return (major, minor) <= (10, 5) - if arch == 'ppc64': - return (major, minor) == (10, 5) - if arch == 'i386': - return (major, minor) >= (10, 4) - if arch == 'x86_64': - return (major, minor) >= (10, 5) - if arch in groups: - for garch in groups[arch]: - if _supports_arch(major, minor, garch): - return True - return False - - groups = OrderedDict([ - ("fat", ("i386", "ppc")), - ("intel", ("x86_64", "i386")), - ("fat64", ("x86_64", "ppc64")), - ("fat32", ("x86_64", "i386", "ppc")), - ]) - - if _supports_arch(major, minor, machine): - arches.append(machine) - - for garch in groups: - if machine in groups[garch] and _supports_arch(major, minor, garch): - arches.append(garch) - - arches.append('universal') - - return arches - - -def get_supported(versions=None, noarch=False, platform=None, - impl=None, abi=None): - """Return a list of supported tags for each version specified in - `versions`. - - :param versions: a list of string versions, of the form ["33", "32"], - or None. The first version will be assumed to support our ABI. - :param platform: specify the exact platform you want valid - tags for, or None. If None, use the local system platform. - :param impl: specify the exact implementation you want valid - tags for, or None. If None, use the local interpreter impl. - :param abi: specify the exact abi you want valid - tags for, or None. If None, use the local interpreter abi. - """ - supported = [] - - # Versions must be given with respect to the preference - if versions is None: - versions = [] - version_info = get_impl_version_info() - major = version_info[:-1] - # Support all previous minor Python versions. - for minor in range(version_info[-1], -1, -1): - versions.append(''.join(map(str, major + (minor,)))) - - impl = impl or get_abbr_impl() - - abis = [] - - abi = abi or get_abi_tag() - if abi: - abis[0:0] = [abi] - - abi3s = set() - import imp - for suffix in imp.get_suffixes(): - if suffix[0].startswith('.abi'): - abi3s.add(suffix[0].split('.', 2)[1]) - - abis.extend(sorted(list(abi3s))) - - abis.append('none') - - if not noarch: - arch = platform or get_platform() - if arch.startswith('macosx'): - # support macosx-10.6-intel on macosx-10.9-x86_64 - match = _osx_arch_pat.match(arch) - if match: - name, major, minor, actual_arch = match.groups() - tpl = '{0}_{1}_%i_%s'.format(name, major) - arches = [] - for m in reversed(range(int(minor) + 1)): - for a in get_darwin_arches(int(major), m, actual_arch): - arches.append(tpl % (m, a)) - else: - # arch pattern didn't match (?!) - arches = [arch] - elif platform is None and is_manylinux1_compatible(): - arches = [arch.replace('linux', 'manylinux1'), arch] - else: - arches = [arch] - - # Current version, current API (built specifically for our Python): - for abi in abis: - for arch in arches: - supported.append(('%s%s' % (impl, versions[0]), abi, arch)) - - # abi3 modules compatible with older version of Python - for version in versions[1:]: - # abi3 was introduced in Python 3.2 - if version in ('31', '30'): - break - for abi in abi3s: # empty set if not Python 3 - for arch in arches: - supported.append(("%s%s" % (impl, version), abi, arch)) - - # Has binaries, does not use the Python API: - for arch in arches: - supported.append(('py%s' % (versions[0][0]), 'none', arch)) - - # No abi / arch, but requires our implementation: - supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) - # Tagged specifically as being cross-version compatible - # (with just the major version specified) - supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) - - # No abi / arch, generic Python - for i, version in enumerate(versions): - supported.append(('py%s' % (version,), 'none', 'any')) - if i == 0: - supported.append(('py%s' % (version[0]), 'none', 'any')) - - return supported - -supported_tags = get_supported() -supported_tags_noarch = get_supported(noarch=True) - -implementation_tag = get_impl_tag() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/req/__init__.py deleted file mode 100644 index 00185a4..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/req/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -from __future__ import absolute_import - -from .req_install import InstallRequirement -from .req_set import RequirementSet, Requirements -from .req_file import parse_requirements - -__all__ = [ - "RequirementSet", "Requirements", "InstallRequirement", - "parse_requirements", -] diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 7ef46aa..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_file.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_file.cpython-36.pyc deleted file mode 100644 index a0a8d2e..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_file.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_install.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_install.cpython-36.pyc deleted file mode 100644 index aeb6bc8..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_install.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_set.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_set.cpython-36.pyc deleted file mode 100644 index 5e7e875..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_set.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_uninstall.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_uninstall.cpython-36.pyc deleted file mode 100644 index 3d189ad..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/req/__pycache__/req_uninstall.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_file.py b/classifier/myenv/lib/python3.6/site-packages/pip/req/req_file.py deleted file mode 100644 index 821df22..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_file.py +++ /dev/null @@ -1,342 +0,0 @@ -""" -Requirements file parsing -""" - -from __future__ import absolute_import - -import os -import re -import shlex -import sys -import optparse -import warnings - -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves import filterfalse - -import pip -from pip.download import get_file_content -from pip.req.req_install import InstallRequirement -from pip.exceptions import (RequirementsFileParseError) -from pip.utils.deprecation import RemovedInPip10Warning -from pip import cmdoptions - -__all__ = ['parse_requirements'] - -SCHEME_RE = re.compile(r'^(http|https|file):', re.I) -COMMENT_RE = re.compile(r'(^|\s)+#.*$') - -SUPPORTED_OPTIONS = [ - cmdoptions.constraints, - cmdoptions.editable, - cmdoptions.requirements, - cmdoptions.no_index, - cmdoptions.index_url, - cmdoptions.find_links, - cmdoptions.extra_index_url, - cmdoptions.allow_external, - cmdoptions.allow_all_external, - cmdoptions.no_allow_external, - cmdoptions.allow_unsafe, - cmdoptions.no_allow_unsafe, - cmdoptions.use_wheel, - cmdoptions.no_use_wheel, - cmdoptions.always_unzip, - cmdoptions.no_binary, - cmdoptions.only_binary, - cmdoptions.pre, - cmdoptions.process_dependency_links, - cmdoptions.trusted_host, - cmdoptions.require_hashes, -] - -# options to be passed to requirements -SUPPORTED_OPTIONS_REQ = [ - cmdoptions.install_options, - cmdoptions.global_options, - cmdoptions.hash, -] - -# the 'dest' string values -SUPPORTED_OPTIONS_REQ_DEST = [o().dest for o in SUPPORTED_OPTIONS_REQ] - - -def parse_requirements(filename, finder=None, comes_from=None, options=None, - session=None, constraint=False, wheel_cache=None): - """Parse a requirements file and yield InstallRequirement instances. - - :param filename: Path or url of requirements file. - :param finder: Instance of pip.index.PackageFinder. - :param comes_from: Origin description of requirements. - :param options: cli options. - :param session: Instance of pip.download.PipSession. - :param constraint: If true, parsing a constraint file rather than - requirements file. - :param wheel_cache: Instance of pip.wheel.WheelCache - """ - if session is None: - raise TypeError( - "parse_requirements() missing 1 required keyword argument: " - "'session'" - ) - - _, content = get_file_content( - filename, comes_from=comes_from, session=session - ) - - lines_enum = preprocess(content, options) - - for line_number, line in lines_enum: - req_iter = process_line(line, filename, line_number, finder, - comes_from, options, session, wheel_cache, - constraint=constraint) - for req in req_iter: - yield req - - -def preprocess(content, options): - """Split, filter, and join lines, and return a line iterator - - :param content: the content of the requirements file - :param options: cli options - """ - lines_enum = enumerate(content.splitlines(), start=1) - lines_enum = join_lines(lines_enum) - lines_enum = ignore_comments(lines_enum) - lines_enum = skip_regex(lines_enum, options) - return lines_enum - - -def process_line(line, filename, line_number, finder=None, comes_from=None, - options=None, session=None, wheel_cache=None, - constraint=False): - """Process a single requirements line; This can result in creating/yielding - requirements, or updating the finder. - - For lines that contain requirements, the only options that have an effect - are from SUPPORTED_OPTIONS_REQ, and they are scoped to the - requirement. Other options from SUPPORTED_OPTIONS may be present, but are - ignored. - - For lines that do not contain requirements, the only options that have an - effect are from SUPPORTED_OPTIONS. Options from SUPPORTED_OPTIONS_REQ may - be present, but are ignored. These lines may contain multiple options - (although our docs imply only one is supported), and all our parsed and - affect the finder. - - :param constraint: If True, parsing a constraints file. - :param options: OptionParser options that we may update - """ - parser = build_parser() - defaults = parser.get_default_values() - defaults.index_url = None - if finder: - # `finder.format_control` will be updated during parsing - defaults.format_control = finder.format_control - args_str, options_str = break_args_options(line) - if sys.version_info < (2, 7, 3): - # Prior to 2.7.3, shlex cannot deal with unicode entries - options_str = options_str.encode('utf8') - opts, _ = parser.parse_args(shlex.split(options_str), defaults) - - # preserve for the nested code path - line_comes_from = '%s %s (line %s)' % ( - '-c' if constraint else '-r', filename, line_number) - - # yield a line requirement - if args_str: - isolated = options.isolated_mode if options else False - if options: - cmdoptions.check_install_build_global(options, opts) - # get the options that apply to requirements - req_options = {} - for dest in SUPPORTED_OPTIONS_REQ_DEST: - if dest in opts.__dict__ and opts.__dict__[dest]: - req_options[dest] = opts.__dict__[dest] - yield InstallRequirement.from_line( - args_str, line_comes_from, constraint=constraint, - isolated=isolated, options=req_options, wheel_cache=wheel_cache - ) - - # yield an editable requirement - elif opts.editables: - isolated = options.isolated_mode if options else False - default_vcs = options.default_vcs if options else None - yield InstallRequirement.from_editable( - opts.editables[0], comes_from=line_comes_from, - constraint=constraint, default_vcs=default_vcs, isolated=isolated, - wheel_cache=wheel_cache - ) - - # parse a nested requirements file - elif opts.requirements or opts.constraints: - if opts.requirements: - req_path = opts.requirements[0] - nested_constraint = False - else: - req_path = opts.constraints[0] - nested_constraint = True - # original file is over http - if SCHEME_RE.search(filename): - # do a url join so relative paths work - req_path = urllib_parse.urljoin(filename, req_path) - # original file and nested file are paths - elif not SCHEME_RE.search(req_path): - # do a join so relative paths work - req_path = os.path.join(os.path.dirname(filename), req_path) - # TODO: Why not use `comes_from='-r {} (line {})'` here as well? - parser = parse_requirements( - req_path, finder, comes_from, options, session, - constraint=nested_constraint, wheel_cache=wheel_cache - ) - for req in parser: - yield req - - # percolate hash-checking option upward - elif opts.require_hashes: - options.require_hashes = opts.require_hashes - - # set finder options - elif finder: - if opts.allow_external: - warnings.warn( - "--allow-external has been deprecated and will be removed in " - "the future. Due to changes in the repository protocol, it no " - "longer has any effect.", - RemovedInPip10Warning, - ) - - if opts.allow_all_external: - warnings.warn( - "--allow-all-external has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if opts.allow_unverified: - warnings.warn( - "--allow-unverified has been deprecated and will be removed " - "in the future. Due to changes in the repository protocol, it " - "no longer has any effect.", - RemovedInPip10Warning, - ) - - if opts.index_url: - finder.index_urls = [opts.index_url] - if opts.use_wheel is False: - finder.use_wheel = False - pip.index.fmt_ctl_no_use_wheel(finder.format_control) - if opts.no_index is True: - finder.index_urls = [] - if opts.extra_index_urls: - finder.index_urls.extend(opts.extra_index_urls) - if opts.find_links: - # FIXME: it would be nice to keep track of the source - # of the find_links: support a find-links local path - # relative to a requirements file. - value = opts.find_links[0] - req_dir = os.path.dirname(os.path.abspath(filename)) - relative_to_reqs_file = os.path.join(req_dir, value) - if os.path.exists(relative_to_reqs_file): - value = relative_to_reqs_file - finder.find_links.append(value) - if opts.pre: - finder.allow_all_prereleases = True - if opts.process_dependency_links: - finder.process_dependency_links = True - if opts.trusted_hosts: - finder.secure_origins.extend( - ("*", host, "*") for host in opts.trusted_hosts) - - -def break_args_options(line): - """Break up the line into an args and options string. We only want to shlex - (and then optparse) the options, not the args. args can contain markers - which are corrupted by shlex. - """ - tokens = line.split(' ') - args = [] - options = tokens[:] - for token in tokens: - if token.startswith('-') or token.startswith('--'): - break - else: - args.append(token) - options.pop(0) - return ' '.join(args), ' '.join(options) - - -def build_parser(): - """ - Return a parser for parsing requirement lines - """ - parser = optparse.OptionParser(add_help_option=False) - - option_factories = SUPPORTED_OPTIONS + SUPPORTED_OPTIONS_REQ - for option_factory in option_factories: - option = option_factory() - parser.add_option(option) - - # By default optparse sys.exits on parsing errors. We want to wrap - # that in our own exception. - def parser_exit(self, msg): - raise RequirementsFileParseError(msg) - parser.exit = parser_exit - - return parser - - -def join_lines(lines_enum): - """Joins a line ending in '\' with the previous line (except when following - comments). The joined line takes on the index of the first line. - """ - primary_line_number = None - new_line = [] - for line_number, line in lines_enum: - if not line.endswith('\\') or COMMENT_RE.match(line): - if COMMENT_RE.match(line): - # this ensures comments are always matched later - line = ' ' + line - if new_line: - new_line.append(line) - yield primary_line_number, ''.join(new_line) - new_line = [] - else: - yield line_number, line - else: - if not new_line: - primary_line_number = line_number - new_line.append(line.strip('\\')) - - # last line contains \ - if new_line: - yield primary_line_number, ''.join(new_line) - - # TODO: handle space after '\'. - - -def ignore_comments(lines_enum): - """ - Strips comments and filter empty lines. - """ - for line_number, line in lines_enum: - line = COMMENT_RE.sub('', line) - line = line.strip() - if line: - yield line_number, line - - -def skip_regex(lines_enum, options): - """ - Skip lines that match '--skip-requirements-regex' pattern - - Note: the regex pattern is only built once - """ - skip_regex = options.skip_requirements_regex if options else None - if skip_regex: - pattern = re.compile(skip_regex) - lines_enum = filterfalse( - lambda e: pattern.search(e[1]), - lines_enum) - return lines_enum diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_install.py b/classifier/myenv/lib/python3.6/site-packages/pip/req/req_install.py deleted file mode 100644 index 1a98f37..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_install.py +++ /dev/null @@ -1,1204 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import re -import shutil -import sys -import tempfile -import traceback -import warnings -import zipfile - -from distutils import sysconfig -from distutils.util import change_root -from email.parser import FeedParser - -from pip._vendor import pkg_resources, six -from pip._vendor.packaging import specifiers -from pip._vendor.packaging.markers import Marker -from pip._vendor.packaging.requirements import InvalidRequirement, Requirement -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.packaging.version import Version, parse as parse_version -from pip._vendor.six.moves import configparser - -import pip.wheel - -from pip.compat import native_str, get_stdlib, WINDOWS -from pip.download import is_url, url_to_path, path_to_url, is_archive_file -from pip.exceptions import ( - InstallationError, UninstallationError, -) -from pip.locations import ( - bin_py, running_under_virtualenv, PIP_DELETE_MARKER_FILENAME, bin_user, -) -from pip.utils import ( - display_path, rmtree, ask_path_exists, backup_dir, is_installable_dir, - dist_in_usersite, dist_in_site_packages, egg_link_path, - call_subprocess, read_text_file, FakeFile, _make_build_dir, ensure_dir, - get_installed_version, normalize_path, dist_is_local, -) - -from pip.utils.hashes import Hashes -from pip.utils.deprecation import RemovedInPip10Warning -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip.utils.ui import open_spinner -from pip.req.req_uninstall import UninstallPathSet -from pip.vcs import vcs -from pip.wheel import move_wheel_files, Wheel - - -logger = logging.getLogger(__name__) - -operators = specifiers.Specifier._operators.keys() - - -def _strip_extras(path): - m = re.match(r'^(.+)(\[[^\]]+\])$', path) - extras = None - if m: - path_no_extras = m.group(1) - extras = m.group(2) - else: - path_no_extras = path - - return path_no_extras, extras - - -def _safe_extras(extras): - return set(pkg_resources.safe_extra(extra) for extra in extras) - - -class InstallRequirement(object): - - def __init__(self, req, comes_from, source_dir=None, editable=False, - link=None, as_egg=False, update=True, - pycompile=True, markers=None, isolated=False, options=None, - wheel_cache=None, constraint=False): - self.extras = () - if isinstance(req, six.string_types): - try: - req = Requirement(req) - except InvalidRequirement: - if os.path.sep in req: - add_msg = "It looks like a path. Does it exist ?" - elif '=' in req and not any(op in req for op in operators): - add_msg = "= is not a valid operator. Did you mean == ?" - else: - add_msg = traceback.format_exc() - raise InstallationError( - "Invalid requirement: '%s'\n%s" % (req, add_msg)) - self.extras = _safe_extras(req.extras) - - self.req = req - self.comes_from = comes_from - self.constraint = constraint - self.source_dir = source_dir - self.editable = editable - - self._wheel_cache = wheel_cache - self.link = self.original_link = link - self.as_egg = as_egg - if markers is not None: - self.markers = markers - else: - self.markers = req and req.marker - self._egg_info_path = None - # This holds the pkg_resources.Distribution object if this requirement - # is already available: - self.satisfied_by = None - # This hold the pkg_resources.Distribution object if this requirement - # conflicts with another installed distribution: - self.conflicts_with = None - # Temporary build location - self._temp_build_dir = None - # Used to store the global directory where the _temp_build_dir should - # have been created. Cf _correct_build_location method. - self._ideal_build_dir = None - # True if the editable should be updated: - self.update = update - # Set to True after successful installation - self.install_succeeded = None - # UninstallPathSet of uninstalled distribution (for possible rollback) - self.uninstalled = None - # Set True if a legitimate do-nothing-on-uninstall has happened - e.g. - # system site packages, stdlib packages. - self.nothing_to_uninstall = False - self.use_user_site = False - self.target_dir = None - self.options = options if options else {} - self.pycompile = pycompile - # Set to True after successful preparation of this requirement - self.prepared = False - - self.isolated = isolated - - @classmethod - def from_editable(cls, editable_req, comes_from=None, default_vcs=None, - isolated=False, options=None, wheel_cache=None, - constraint=False): - from pip.index import Link - - name, url, extras_override = parse_editable( - editable_req, default_vcs) - if url.startswith('file:'): - source_dir = url_to_path(url) - else: - source_dir = None - - res = cls(name, comes_from, source_dir=source_dir, - editable=True, - link=Link(url), - constraint=constraint, - isolated=isolated, - options=options if options else {}, - wheel_cache=wheel_cache) - - if extras_override is not None: - res.extras = _safe_extras(extras_override) - - return res - - @classmethod - def from_line( - cls, name, comes_from=None, isolated=False, options=None, - wheel_cache=None, constraint=False): - """Creates an InstallRequirement from a name, which might be a - requirement, directory containing 'setup.py', filename, or URL. - """ - from pip.index import Link - - if is_url(name): - marker_sep = '; ' - else: - marker_sep = ';' - if marker_sep in name: - name, markers = name.split(marker_sep, 1) - markers = markers.strip() - if not markers: - markers = None - else: - markers = Marker(markers) - else: - markers = None - name = name.strip() - req = None - path = os.path.normpath(os.path.abspath(name)) - link = None - extras = None - - if is_url(name): - link = Link(name) - else: - p, extras = _strip_extras(path) - if (os.path.isdir(p) and - (os.path.sep in name or name.startswith('.'))): - - if not is_installable_dir(p): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' " - "not found." % name - ) - link = Link(path_to_url(p)) - elif is_archive_file(p): - if not os.path.isfile(p): - logger.warning( - 'Requirement %r looks like a filename, but the ' - 'file does not exist', - name - ) - link = Link(path_to_url(p)) - - # it's a local file, dir, or url - if link: - # Handle relative file URLs - if link.scheme == 'file' and re.search(r'\.\./', link.url): - link = Link( - path_to_url(os.path.normpath(os.path.abspath(link.path)))) - # wheel file - if link.is_wheel: - wheel = Wheel(link.filename) # can raise InvalidWheelFilename - req = "%s==%s" % (wheel.name, wheel.version) - else: - # set the req to the egg fragment. when it's not there, this - # will become an 'unnamed' requirement - req = link.egg_fragment - - # a requirement specifier - else: - req = name - - options = options if options else {} - res = cls(req, comes_from, link=link, markers=markers, - isolated=isolated, options=options, - wheel_cache=wheel_cache, constraint=constraint) - - if extras: - res.extras = _safe_extras( - Requirement('placeholder' + extras).extras) - - return res - - def __str__(self): - if self.req: - s = str(self.req) - if self.link: - s += ' from %s' % self.link.url - else: - s = self.link.url if self.link else None - if self.satisfied_by is not None: - s += ' in %s' % display_path(self.satisfied_by.location) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += ' (from %s)' % comes_from - return s - - def __repr__(self): - return '<%s object: %s editable=%r>' % ( - self.__class__.__name__, str(self), self.editable) - - def populate_link(self, finder, upgrade, require_hashes): - """Ensure that if a link can be found for this, that it is found. - - Note that self.link may still be None - if Upgrade is False and the - requirement is already installed. - - If require_hashes is True, don't use the wheel cache, because cached - wheels, always built locally, have different hashes than the files - downloaded from the index server and thus throw false hash mismatches. - Furthermore, cached wheels at present have undeterministic contents due - to file modification times. - """ - if self.link is None: - self.link = finder.find_requirement(self, upgrade) - if self._wheel_cache is not None and not require_hashes: - old_link = self.link - self.link = self._wheel_cache.cached_wheel(self.link, self.name) - if old_link != self.link: - logger.debug('Using cached wheel link: %s', self.link) - - @property - def specifier(self): - return self.req.specifier - - @property - def is_pinned(self): - """Return whether I am pinned to an exact version. - - For example, some-package==1.2 is pinned; some-package>1.2 is not. - """ - specifiers = self.specifier - return (len(specifiers) == 1 and - next(iter(specifiers)).operator in ('==', '===')) - - def from_path(self): - if self.req is None: - return None - s = str(self.req) - if self.comes_from: - if isinstance(self.comes_from, six.string_types): - comes_from = self.comes_from - else: - comes_from = self.comes_from.from_path() - if comes_from: - s += '->' + comes_from - return s - - def build_location(self, build_dir): - if self._temp_build_dir is not None: - return self._temp_build_dir - if self.req is None: - # for requirement via a path to a directory: the name of the - # package is not available yet so we create a temp directory - # Once run_egg_info will have run, we'll be able - # to fix it via _correct_build_location - # Some systems have /tmp as a symlink which confuses custom - # builds (such as numpy). Thus, we ensure that the real path - # is returned. - self._temp_build_dir = os.path.realpath( - tempfile.mkdtemp('-build', 'pip-') - ) - self._ideal_build_dir = build_dir - return self._temp_build_dir - if self.editable: - name = self.name.lower() - else: - name = self.name - # FIXME: Is there a better place to create the build_dir? (hg and bzr - # need this) - if not os.path.exists(build_dir): - logger.debug('Creating directory %s', build_dir) - _make_build_dir(build_dir) - return os.path.join(build_dir, name) - - def _correct_build_location(self): - """Move self._temp_build_dir to self._ideal_build_dir/self.req.name - - For some requirements (e.g. a path to a directory), the name of the - package is not available until we run egg_info, so the build_location - will return a temporary directory and store the _ideal_build_dir. - - This is only called by self.egg_info_path to fix the temporary build - directory. - """ - if self.source_dir is not None: - return - assert self.req is not None - assert self._temp_build_dir - assert self._ideal_build_dir - old_location = self._temp_build_dir - self._temp_build_dir = None - new_location = self.build_location(self._ideal_build_dir) - if os.path.exists(new_location): - raise InstallationError( - 'A package already exists in %s; please remove it to continue' - % display_path(new_location)) - logger.debug( - 'Moving package %s from %s to new location %s', - self, display_path(old_location), display_path(new_location), - ) - shutil.move(old_location, new_location) - self._temp_build_dir = new_location - self._ideal_build_dir = None - self.source_dir = new_location - self._egg_info_path = None - - @property - def name(self): - if self.req is None: - return None - return native_str(pkg_resources.safe_name(self.req.name)) - - @property - def setup_py_dir(self): - return os.path.join( - self.source_dir, - self.link and self.link.subdirectory_fragment or '') - - @property - def setup_py(self): - assert self.source_dir, "No source dir for %s" % self - try: - import setuptools # noqa - except ImportError: - if get_installed_version('setuptools') is None: - add_msg = "Please install setuptools." - else: - add_msg = traceback.format_exc() - # Setuptools is not available - raise InstallationError( - "Could not import setuptools which is required to " - "install from a source distribution.\n%s" % add_msg - ) - - setup_py = os.path.join(self.setup_py_dir, 'setup.py') - - # Python2 __file__ should not be unicode - if six.PY2 and isinstance(setup_py, six.text_type): - setup_py = setup_py.encode(sys.getfilesystemencoding()) - - return setup_py - - def run_egg_info(self): - assert self.source_dir - if self.name: - logger.debug( - 'Running setup.py (path:%s) egg_info for package %s', - self.setup_py, self.name, - ) - else: - logger.debug( - 'Running setup.py (path:%s) egg_info for package from %s', - self.setup_py, self.link, - ) - - with indent_log(): - script = SETUPTOOLS_SHIM % self.setup_py - base_cmd = [sys.executable, '-c', script] - if self.isolated: - base_cmd += ["--no-user-cfg"] - egg_info_cmd = base_cmd + ['egg_info'] - # We can't put the .egg-info files at the root, because then the - # source code will be mistaken for an installed egg, causing - # problems - if self.editable: - egg_base_option = [] - else: - egg_info_dir = os.path.join(self.setup_py_dir, 'pip-egg-info') - ensure_dir(egg_info_dir) - egg_base_option = ['--egg-base', 'pip-egg-info'] - call_subprocess( - egg_info_cmd + egg_base_option, - cwd=self.setup_py_dir, - show_stdout=False, - command_desc='python setup.py egg_info') - - if not self.req: - if isinstance(parse_version(self.pkg_info()["Version"]), Version): - op = "==" - else: - op = "===" - self.req = Requirement( - "".join([ - self.pkg_info()["Name"], - op, - self.pkg_info()["Version"], - ]) - ) - self._correct_build_location() - else: - metadata_name = canonicalize_name(self.pkg_info()["Name"]) - if canonicalize_name(self.req.name) != metadata_name: - logger.warning( - 'Running setup.py (path:%s) egg_info for package %s ' - 'produced metadata for project name %s. Fix your ' - '#egg=%s fragments.', - self.setup_py, self.name, metadata_name, self.name - ) - self.req = Requirement(metadata_name) - - def egg_info_data(self, filename): - if self.satisfied_by is not None: - if not self.satisfied_by.has_metadata(filename): - return None - return self.satisfied_by.get_metadata(filename) - assert self.source_dir - filename = self.egg_info_path(filename) - if not os.path.exists(filename): - return None - data = read_text_file(filename) - return data - - def egg_info_path(self, filename): - if self._egg_info_path is None: - if self.editable: - base = self.source_dir - else: - base = os.path.join(self.setup_py_dir, 'pip-egg-info') - filenames = os.listdir(base) - if self.editable: - filenames = [] - for root, dirs, files in os.walk(base): - for dir in vcs.dirnames: - if dir in dirs: - dirs.remove(dir) - # Iterate over a copy of ``dirs``, since mutating - # a list while iterating over it can cause trouble. - # (See https://github.com/pypa/pip/pull/462.) - for dir in list(dirs): - # Don't search in anything that looks like a virtualenv - # environment - if ( - os.path.lexists( - os.path.join(root, dir, 'bin', 'python') - ) or - os.path.exists( - os.path.join( - root, dir, 'Scripts', 'Python.exe' - ) - )): - dirs.remove(dir) - # Also don't search through tests - elif dir == 'test' or dir == 'tests': - dirs.remove(dir) - filenames.extend([os.path.join(root, dir) - for dir in dirs]) - filenames = [f for f in filenames if f.endswith('.egg-info')] - - if not filenames: - raise InstallationError( - 'No files/directories in %s (from %s)' % (base, filename) - ) - assert filenames, \ - "No files/directories in %s (from %s)" % (base, filename) - - # if we have more than one match, we pick the toplevel one. This - # can easily be the case if there is a dist folder which contains - # an extracted tarball for testing purposes. - if len(filenames) > 1: - filenames.sort( - key=lambda x: x.count(os.path.sep) + - (os.path.altsep and x.count(os.path.altsep) or 0) - ) - self._egg_info_path = os.path.join(base, filenames[0]) - return os.path.join(self._egg_info_path, filename) - - def pkg_info(self): - p = FeedParser() - data = self.egg_info_data('PKG-INFO') - if not data: - logger.warning( - 'No PKG-INFO file found in %s', - display_path(self.egg_info_path('PKG-INFO')), - ) - p.feed(data or '') - return p.close() - - _requirements_section_re = re.compile(r'\[(.*?)\]') - - @property - def installed_version(self): - return get_installed_version(self.name) - - def assert_source_matches_version(self): - assert self.source_dir - version = self.pkg_info()['version'] - if self.req.specifier and version not in self.req.specifier: - logger.warning( - 'Requested %s, but installing version %s', - self, - self.installed_version, - ) - else: - logger.debug( - 'Source in %s has version %s, which satisfies requirement %s', - display_path(self.source_dir), - version, - self, - ) - - def update_editable(self, obtain=True): - if not self.link: - logger.debug( - "Cannot update repository at %s; repository location is " - "unknown", - self.source_dir, - ) - return - assert self.editable - assert self.source_dir - if self.link.scheme == 'file': - # Static paths don't get updated - return - assert '+' in self.link.url, "bad url: %r" % self.link.url - if not self.update: - return - vc_type, url = self.link.url.split('+', 1) - backend = vcs.get_backend(vc_type) - if backend: - vcs_backend = backend(self.link.url) - if obtain: - vcs_backend.obtain(self.source_dir) - else: - vcs_backend.export(self.source_dir) - else: - assert 0, ( - 'Unexpected version control type (in %s): %s' - % (self.link, vc_type)) - - def uninstall(self, auto_confirm=False): - """ - Uninstall the distribution currently satisfying this requirement. - - Prompts before removing or modifying files unless - ``auto_confirm`` is True. - - Refuses to delete or modify files outside of ``sys.prefix`` - - thus uninstallation within a virtual environment can only - modify that virtual environment, even if the virtualenv is - linked to global site-packages. - - """ - if not self.check_if_exists(): - raise UninstallationError( - "Cannot uninstall requirement %s, not installed" % (self.name,) - ) - dist = self.satisfied_by or self.conflicts_with - - dist_path = normalize_path(dist.location) - if not dist_is_local(dist): - logger.info( - "Not uninstalling %s at %s, outside environment %s", - dist.key, - dist_path, - sys.prefix, - ) - self.nothing_to_uninstall = True - return - - if dist_path in get_stdlib(): - logger.info( - "Not uninstalling %s at %s, as it is in the standard library.", - dist.key, - dist_path, - ) - self.nothing_to_uninstall = True - return - - paths_to_remove = UninstallPathSet(dist) - develop_egg_link = egg_link_path(dist) - develop_egg_link_egg_info = '{0}.egg-info'.format( - pkg_resources.to_filename(dist.project_name)) - egg_info_exists = dist.egg_info and os.path.exists(dist.egg_info) - # Special case for distutils installed package - distutils_egg_info = getattr(dist._provider, 'path', None) - - # Uninstall cases order do matter as in the case of 2 installs of the - # same package, pip needs to uninstall the currently detected version - if (egg_info_exists and dist.egg_info.endswith('.egg-info') and - not dist.egg_info.endswith(develop_egg_link_egg_info)): - # if dist.egg_info.endswith(develop_egg_link_egg_info), we - # are in fact in the develop_egg_link case - paths_to_remove.add(dist.egg_info) - if dist.has_metadata('installed-files.txt'): - for installed_file in dist.get_metadata( - 'installed-files.txt').splitlines(): - path = os.path.normpath( - os.path.join(dist.egg_info, installed_file) - ) - paths_to_remove.add(path) - # FIXME: need a test for this elif block - # occurs with --single-version-externally-managed/--record outside - # of pip - elif dist.has_metadata('top_level.txt'): - if dist.has_metadata('namespace_packages.txt'): - namespaces = dist.get_metadata('namespace_packages.txt') - else: - namespaces = [] - for top_level_pkg in [ - p for p - in dist.get_metadata('top_level.txt').splitlines() - if p and p not in namespaces]: - path = os.path.join(dist.location, top_level_pkg) - paths_to_remove.add(path) - paths_to_remove.add(path + '.py') - paths_to_remove.add(path + '.pyc') - paths_to_remove.add(path + '.pyo') - - elif distutils_egg_info: - warnings.warn( - "Uninstalling a distutils installed project ({0}) has been " - "deprecated and will be removed in a future version. This is " - "due to the fact that uninstalling a distutils project will " - "only partially uninstall the project.".format(self.name), - RemovedInPip10Warning, - ) - paths_to_remove.add(distutils_egg_info) - - elif dist.location.endswith('.egg'): - # package installed by easy_install - # We cannot match on dist.egg_name because it can slightly vary - # i.e. setuptools-0.6c11-py2.6.egg vs setuptools-0.6rc11-py2.6.egg - paths_to_remove.add(dist.location) - easy_install_egg = os.path.split(dist.location)[1] - easy_install_pth = os.path.join(os.path.dirname(dist.location), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, './' + easy_install_egg) - - elif egg_info_exists and dist.egg_info.endswith('.dist-info'): - for path in pip.wheel.uninstallation_paths(dist): - paths_to_remove.add(path) - - elif develop_egg_link: - # develop egg - with open(develop_egg_link, 'r') as fh: - link_pointer = os.path.normcase(fh.readline().strip()) - assert (link_pointer == dist.location), ( - 'Egg-link %s does not match installed location of %s ' - '(at %s)' % (link_pointer, self.name, dist.location) - ) - paths_to_remove.add(develop_egg_link) - easy_install_pth = os.path.join(os.path.dirname(develop_egg_link), - 'easy-install.pth') - paths_to_remove.add_pth(easy_install_pth, dist.location) - - else: - logger.debug( - 'Not sure how to uninstall: %s - Check: %s', - dist, dist.location) - - # find distutils scripts= scripts - if dist.has_metadata('scripts') and dist.metadata_isdir('scripts'): - for script in dist.metadata_listdir('scripts'): - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - paths_to_remove.add(os.path.join(bin_dir, script)) - if WINDOWS: - paths_to_remove.add(os.path.join(bin_dir, script) + '.bat') - - # find console_scripts - if dist.has_metadata('entry_points.txt'): - if six.PY2: - options = {} - else: - options = {"delimiters": ('=', )} - config = configparser.SafeConfigParser(**options) - config.readfp( - FakeFile(dist.get_metadata_lines('entry_points.txt')) - ) - if config.has_section('console_scripts'): - for name, value in config.items('console_scripts'): - if dist_in_usersite(dist): - bin_dir = bin_user - else: - bin_dir = bin_py - paths_to_remove.add(os.path.join(bin_dir, name)) - if WINDOWS: - paths_to_remove.add( - os.path.join(bin_dir, name) + '.exe' - ) - paths_to_remove.add( - os.path.join(bin_dir, name) + '.exe.manifest' - ) - paths_to_remove.add( - os.path.join(bin_dir, name) + '-script.py' - ) - - paths_to_remove.remove(auto_confirm) - self.uninstalled = paths_to_remove - - def rollback_uninstall(self): - if self.uninstalled: - self.uninstalled.rollback() - else: - logger.error( - "Can't rollback %s, nothing uninstalled.", self.name, - ) - - def commit_uninstall(self): - if self.uninstalled: - self.uninstalled.commit() - elif not self.nothing_to_uninstall: - logger.error( - "Can't commit %s, nothing uninstalled.", self.name, - ) - - def archive(self, build_dir): - assert self.source_dir - create_archive = True - archive_name = '%s-%s.zip' % (self.name, self.pkg_info()["version"]) - archive_path = os.path.join(build_dir, archive_name) - if os.path.exists(archive_path): - response = ask_path_exists( - 'The file %s exists. (i)gnore, (w)ipe, (b)ackup, (a)bort ' % - display_path(archive_path), ('i', 'w', 'b', 'a')) - if response == 'i': - create_archive = False - elif response == 'w': - logger.warning('Deleting %s', display_path(archive_path)) - os.remove(archive_path) - elif response == 'b': - dest_file = backup_dir(archive_path) - logger.warning( - 'Backing up %s to %s', - display_path(archive_path), - display_path(dest_file), - ) - shutil.move(archive_path, dest_file) - elif response == 'a': - sys.exit(-1) - if create_archive: - zip = zipfile.ZipFile( - archive_path, 'w', zipfile.ZIP_DEFLATED, - allowZip64=True - ) - dir = os.path.normcase(os.path.abspath(self.setup_py_dir)) - for dirpath, dirnames, filenames in os.walk(dir): - if 'pip-egg-info' in dirnames: - dirnames.remove('pip-egg-info') - for dirname in dirnames: - dirname = os.path.join(dirpath, dirname) - name = self._clean_zip_name(dirname, dir) - zipdir = zipfile.ZipInfo(self.name + '/' + name + '/') - zipdir.external_attr = 0x1ED << 16 # 0o755 - zip.writestr(zipdir, '') - for filename in filenames: - if filename == PIP_DELETE_MARKER_FILENAME: - continue - filename = os.path.join(dirpath, filename) - name = self._clean_zip_name(filename, dir) - zip.write(filename, self.name + '/' + name) - zip.close() - logger.info('Saved %s', display_path(archive_path)) - - def _clean_zip_name(self, name, prefix): - assert name.startswith(prefix + os.path.sep), ( - "name %r doesn't start with prefix %r" % (name, prefix) - ) - name = name[len(prefix) + 1:] - name = name.replace(os.path.sep, '/') - return name - - def match_markers(self, extras_requested=None): - if not extras_requested: - # Provide an extra to safely evaluate the markers - # without matching any extra - extras_requested = ('',) - if self.markers is not None: - return any( - self.markers.evaluate({'extra': extra}) - for extra in extras_requested) - else: - return True - - def install(self, install_options, global_options=[], root=None, - prefix=None): - if self.editable: - self.install_editable( - install_options, global_options, prefix=prefix) - return - if self.is_wheel: - version = pip.wheel.wheel_version(self.source_dir) - pip.wheel.check_compatibility(version, self.name) - - self.move_wheel_files(self.source_dir, root=root, prefix=prefix) - self.install_succeeded = True - return - - # Extend the list of global and install options passed on to - # the setup.py call with the ones from the requirements file. - # Options specified in requirements file override those - # specified on the command line, since the last option given - # to setup.py is the one that is used. - global_options += self.options.get('global_options', []) - install_options += self.options.get('install_options', []) - - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - - temp_location = tempfile.mkdtemp('-record', 'pip-') - record_filename = os.path.join(temp_location, 'install-record.txt') - try: - install_args = self.get_install_args( - global_options, record_filename, root, prefix) - msg = 'Running setup.py install for %s' % (self.name,) - with open_spinner(msg) as spinner: - with indent_log(): - call_subprocess( - install_args + install_options, - cwd=self.setup_py_dir, - show_stdout=False, - spinner=spinner, - ) - - if not os.path.exists(record_filename): - logger.debug('Record file %s not found', record_filename) - return - self.install_succeeded = True - if self.as_egg: - # there's no --always-unzip option we can pass to install - # command so we unable to save the installed-files.txt - return - - def prepend_root(path): - if root is None or not os.path.isabs(path): - return path - else: - return change_root(root, path) - - with open(record_filename) as f: - for line in f: - directory = os.path.dirname(line) - if directory.endswith('.egg-info'): - egg_info_dir = prepend_root(directory) - break - else: - logger.warning( - 'Could not find .egg-info directory in install record' - ' for %s', - self, - ) - # FIXME: put the record somewhere - # FIXME: should this be an error? - return - new_lines = [] - with open(record_filename) as f: - for line in f: - filename = line.strip() - if os.path.isdir(filename): - filename += os.path.sep - new_lines.append( - os.path.relpath( - prepend_root(filename), egg_info_dir) - ) - inst_files_path = os.path.join(egg_info_dir, 'installed-files.txt') - with open(inst_files_path, 'w') as f: - f.write('\n'.join(new_lines) + '\n') - finally: - if os.path.exists(record_filename): - os.remove(record_filename) - rmtree(temp_location) - - def ensure_has_source_dir(self, parent_dir): - """Ensure that a source_dir is set. - - This will create a temporary build dir if the name of the requirement - isn't known yet. - - :param parent_dir: The ideal pip parent_dir for the source_dir. - Generally src_dir for editables and build_dir for sdists. - :return: self.source_dir - """ - if self.source_dir is None: - self.source_dir = self.build_location(parent_dir) - return self.source_dir - - def get_install_args(self, global_options, record_filename, root, prefix): - install_args = [sys.executable, "-u"] - install_args.append('-c') - install_args.append(SETUPTOOLS_SHIM % self.setup_py) - install_args += list(global_options) + \ - ['install', '--record', record_filename] - - if not self.as_egg: - install_args += ['--single-version-externally-managed'] - - if root is not None: - install_args += ['--root', root] - if prefix is not None: - install_args += ['--prefix', prefix] - - if self.pycompile: - install_args += ["--compile"] - else: - install_args += ["--no-compile"] - - if running_under_virtualenv(): - py_ver_str = 'python' + sysconfig.get_python_version() - install_args += ['--install-headers', - os.path.join(sys.prefix, 'include', 'site', - py_ver_str, self.name)] - - return install_args - - def remove_temporary_source(self): - """Remove the source files from this requirement, if they are marked - for deletion""" - if self.source_dir and os.path.exists( - os.path.join(self.source_dir, PIP_DELETE_MARKER_FILENAME)): - logger.debug('Removing source in %s', self.source_dir) - rmtree(self.source_dir) - self.source_dir = None - if self._temp_build_dir and os.path.exists(self._temp_build_dir): - rmtree(self._temp_build_dir) - self._temp_build_dir = None - - def install_editable(self, install_options, - global_options=(), prefix=None): - logger.info('Running setup.py develop for %s', self.name) - - if self.isolated: - global_options = list(global_options) + ["--no-user-cfg"] - - if prefix: - prefix_param = ['--prefix={0}'.format(prefix)] - install_options = list(install_options) + prefix_param - - with indent_log(): - # FIXME: should we do --install-headers here too? - call_subprocess( - [ - sys.executable, - '-c', - SETUPTOOLS_SHIM % self.setup_py - ] + - list(global_options) + - ['develop', '--no-deps'] + - list(install_options), - - cwd=self.setup_py_dir, - show_stdout=False) - - self.install_succeeded = True - - def check_if_exists(self): - """Find an installed distribution that satisfies or conflicts - with this requirement, and set self.satisfied_by or - self.conflicts_with appropriately. - """ - if self.req is None: - return False - try: - # get_distribution() will resolve the entire list of requirements - # anyway, and we've already determined that we need the requirement - # in question, so strip the marker so that we don't try to - # evaluate it. - no_marker = Requirement(str(self.req)) - no_marker.marker = None - self.satisfied_by = pkg_resources.get_distribution(str(no_marker)) - if self.editable and self.satisfied_by: - self.conflicts_with = self.satisfied_by - # when installing editables, nothing pre-existing should ever - # satisfy - self.satisfied_by = None - return True - except pkg_resources.DistributionNotFound: - return False - except pkg_resources.VersionConflict: - existing_dist = pkg_resources.get_distribution( - self.req.name - ) - if self.use_user_site: - if dist_in_usersite(existing_dist): - self.conflicts_with = existing_dist - elif (running_under_virtualenv() and - dist_in_site_packages(existing_dist)): - raise InstallationError( - "Will not install to the user site because it will " - "lack sys.path precedence to %s in %s" % - (existing_dist.project_name, existing_dist.location) - ) - else: - self.conflicts_with = existing_dist - return True - - @property - def is_wheel(self): - return self.link and self.link.is_wheel - - def move_wheel_files(self, wheeldir, root=None, prefix=None): - move_wheel_files( - self.name, self.req, wheeldir, - user=self.use_user_site, - home=self.target_dir, - root=root, - prefix=prefix, - pycompile=self.pycompile, - isolated=self.isolated, - ) - - def get_dist(self): - """Return a pkg_resources.Distribution built from self.egg_info_path""" - egg_info = self.egg_info_path('').rstrip('/') - base_dir = os.path.dirname(egg_info) - metadata = pkg_resources.PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - return pkg_resources.Distribution( - os.path.dirname(egg_info), - project_name=dist_name, - metadata=metadata) - - @property - def has_hash_options(self): - """Return whether any known-good hashes are specified as options. - - These activate --require-hashes mode; hashes specified as part of a - URL do not. - - """ - return bool(self.options.get('hashes', {})) - - def hashes(self, trust_internet=True): - """Return a hash-comparer that considers my option- and URL-based - hashes to be known-good. - - Hashes in URLs--ones embedded in the requirements file, not ones - downloaded from an index server--are almost peers with ones from - flags. They satisfy --require-hashes (whether it was implicitly or - explicitly activated) but do not activate it. md5 and sha224 are not - allowed in flags, which should nudge people toward good algos. We - always OR all hashes together, even ones from URLs. - - :param trust_internet: Whether to trust URL-based (#md5=...) hashes - downloaded from the internet, as by populate_link() - - """ - good_hashes = self.options.get('hashes', {}).copy() - link = self.link if trust_internet else self.original_link - if link and link.hash: - good_hashes.setdefault(link.hash_name, []).append(link.hash) - return Hashes(good_hashes) - - -def _strip_postfix(req): - """ - Strip req postfix ( -dev, 0.2, etc ) - """ - # FIXME: use package_to_requirement? - match = re.search(r'^(.*?)(?:-dev|-\d.*)$', req) - if match: - # Strip off -dev, -0.2, etc. - req = match.group(1) - return req - - -def parse_editable(editable_req, default_vcs=None): - """Parses an editable requirement into: - - a requirement name - - an URL - - extras - - editable options - Accepted requirements: - svn+http://blahblah@rev#egg=Foobar[baz]&subdirectory=version_subdir - .[some_extra] - """ - - from pip.index import Link - - url = editable_req - extras = None - - # If a file path is specified with extras, strip off the extras. - m = re.match(r'^(.+)(\[[^\]]+\])$', url) - if m: - url_no_extras = m.group(1) - extras = m.group(2) - else: - url_no_extras = url - - if os.path.isdir(url_no_extras): - if not os.path.exists(os.path.join(url_no_extras, 'setup.py')): - raise InstallationError( - "Directory %r is not installable. File 'setup.py' not found." % - url_no_extras - ) - # Treating it as code that has already been checked out - url_no_extras = path_to_url(url_no_extras) - - if url_no_extras.lower().startswith('file:'): - package_name = Link(url_no_extras).egg_fragment - if extras: - return ( - package_name, - url_no_extras, - Requirement("placeholder" + extras.lower()).extras, - ) - else: - return package_name, url_no_extras, None - - for version_control in vcs: - if url.lower().startswith('%s:' % version_control): - url = '%s+%s' % (version_control, url) - break - - if '+' not in url: - if default_vcs: - warnings.warn( - "--default-vcs has been deprecated and will be removed in " - "the future.", - RemovedInPip10Warning, - ) - url = default_vcs + '+' + url - else: - raise InstallationError( - '%s should either be a path to a local project or a VCS url ' - 'beginning with svn+, git+, hg+, or bzr+' % - editable_req - ) - - vc_type = url.split('+', 1)[0].lower() - - if not vcs.get_backend(vc_type): - error_message = 'For --editable=%s only ' % editable_req + \ - ', '.join([backend.name + '+URL' for backend in vcs.backends]) + \ - ' is currently supported' - raise InstallationError(error_message) - - package_name = Link(url).egg_fragment - if not package_name: - raise InstallationError( - "Could not detect requirement name, please specify one with #egg=" - ) - if not package_name: - raise InstallationError( - '--editable=%s is not the right format; it must have ' - '#egg=Package' % editable_req - ) - return _strip_postfix(package_name), url, None diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_set.py b/classifier/myenv/lib/python3.6/site-packages/pip/req/req_set.py deleted file mode 100644 index 76aec06..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_set.py +++ /dev/null @@ -1,798 +0,0 @@ -from __future__ import absolute_import - -from collections import defaultdict -from itertools import chain -import logging -import os - -from pip._vendor import pkg_resources -from pip._vendor import requests - -from pip.compat import expanduser -from pip.download import (is_file_url, is_dir_url, is_vcs_url, url_to_path, - unpack_url) -from pip.exceptions import (InstallationError, BestVersionAlreadyInstalled, - DistributionNotFound, PreviousBuildDirError, - HashError, HashErrors, HashUnpinned, - DirectoryUrlHashUnsupported, VcsHashUnsupported, - UnsupportedPythonVersion) -from pip.req.req_install import InstallRequirement -from pip.utils import ( - display_path, dist_in_usersite, ensure_dir, normalize_path) -from pip.utils.hashes import MissingHashes -from pip.utils.logging import indent_log -from pip.utils.packaging import check_dist_requires_python -from pip.vcs import vcs -from pip.wheel import Wheel - -logger = logging.getLogger(__name__) - - -class Requirements(object): - - def __init__(self): - self._keys = [] - self._dict = {} - - def keys(self): - return self._keys - - def values(self): - return [self._dict[key] for key in self._keys] - - def __contains__(self, item): - return item in self._keys - - def __setitem__(self, key, value): - if key not in self._keys: - self._keys.append(key) - self._dict[key] = value - - def __getitem__(self, key): - return self._dict[key] - - def __repr__(self): - values = ['%s: %s' % (repr(k), repr(self[k])) for k in self.keys()] - return 'Requirements({%s})' % ', '.join(values) - - -class DistAbstraction(object): - """Abstracts out the wheel vs non-wheel prepare_files logic. - - The requirements for anything installable are as follows: - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - we must be able to generate a list of run-time dependencies - without installing any additional packages (or we would - have to either burn time by doing temporary isolated installs - or alternatively violate pips 'don't start installing unless - all requirements are available' rule - neither of which are - desirable). - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req_to_install): - self.req_to_install = req_to_install - - def dist(self, finder): - """Return a setuptools Dist object.""" - raise NotImplementedError(self.dist) - - def prep_for_dist(self): - """Ensure that we can get a Dist for this requirement.""" - raise NotImplementedError(self.dist) - - -def make_abstract_dist(req_to_install): - """Factory to make an abstract dist object. - - Preconditions: Either an editable req with a source_dir, or satisfied_by or - a wheel link, or a non-editable req with a source_dir. - - :return: A concrete DistAbstraction. - """ - if req_to_install.editable: - return IsSDist(req_to_install) - elif req_to_install.link and req_to_install.link.is_wheel: - return IsWheel(req_to_install) - else: - return IsSDist(req_to_install) - - -class IsWheel(DistAbstraction): - - def dist(self, finder): - return list(pkg_resources.find_distributions( - self.req_to_install.source_dir))[0] - - def prep_for_dist(self): - # FIXME:https://github.com/pypa/pip/issues/1112 - pass - - -class IsSDist(DistAbstraction): - - def dist(self, finder): - dist = self.req_to_install.get_dist() - # FIXME: shouldn't be globally added: - if dist.has_metadata('dependency_links.txt'): - finder.add_dependency_links( - dist.get_metadata_lines('dependency_links.txt') - ) - return dist - - def prep_for_dist(self): - self.req_to_install.run_egg_info() - self.req_to_install.assert_source_matches_version() - - -class Installed(DistAbstraction): - - def dist(self, finder): - return self.req_to_install.satisfied_by - - def prep_for_dist(self): - pass - - -class RequirementSet(object): - - def __init__(self, build_dir, src_dir, download_dir, upgrade=False, - upgrade_strategy=None, ignore_installed=False, as_egg=False, - target_dir=None, ignore_dependencies=False, - force_reinstall=False, use_user_site=False, session=None, - pycompile=True, isolated=False, wheel_download_dir=None, - wheel_cache=None, require_hashes=False, - ignore_requires_python=False): - """Create a RequirementSet. - - :param wheel_download_dir: Where still-packed .whl files should be - written to. If None they are written to the download_dir parameter. - Separate to download_dir to permit only keeping wheel archives for - pip wheel. - :param download_dir: Where still packed archives should be written to. - If None they are not saved, and are deleted immediately after - unpacking. - :param wheel_cache: The pip wheel cache, for passing to - InstallRequirement. - """ - if session is None: - raise TypeError( - "RequirementSet() missing 1 required keyword argument: " - "'session'" - ) - - self.build_dir = build_dir - self.src_dir = src_dir - # XXX: download_dir and wheel_download_dir overlap semantically and may - # be combined if we're willing to have non-wheel archives present in - # the wheelhouse output by 'pip wheel'. - self.download_dir = download_dir - self.upgrade = upgrade - self.upgrade_strategy = upgrade_strategy - self.ignore_installed = ignore_installed - self.force_reinstall = force_reinstall - self.requirements = Requirements() - # Mapping of alias: real_name - self.requirement_aliases = {} - self.unnamed_requirements = [] - self.ignore_dependencies = ignore_dependencies - self.ignore_requires_python = ignore_requires_python - self.successfully_downloaded = [] - self.successfully_installed = [] - self.reqs_to_cleanup = [] - self.as_egg = as_egg - self.use_user_site = use_user_site - self.target_dir = target_dir # set from --target option - self.session = session - self.pycompile = pycompile - self.isolated = isolated - if wheel_download_dir: - wheel_download_dir = normalize_path(wheel_download_dir) - self.wheel_download_dir = wheel_download_dir - self._wheel_cache = wheel_cache - self.require_hashes = require_hashes - # Maps from install_req -> dependencies_of_install_req - self._dependencies = defaultdict(list) - - def __str__(self): - reqs = [req for req in self.requirements.values() - if not req.comes_from] - reqs.sort(key=lambda req: req.name.lower()) - return ' '.join([str(req.req) for req in reqs]) - - def __repr__(self): - reqs = [req for req in self.requirements.values()] - reqs.sort(key=lambda req: req.name.lower()) - reqs_str = ', '.join([str(req.req) for req in reqs]) - return ('<%s object; %d requirement(s): %s>' - % (self.__class__.__name__, len(reqs), reqs_str)) - - def add_requirement(self, install_req, parent_req_name=None, - extras_requested=None): - """Add install_req as a requirement to install. - - :param parent_req_name: The name of the requirement that needed this - added. The name is used because when multiple unnamed requirements - resolve to the same name, we could otherwise end up with dependency - links that point outside the Requirements set. parent_req must - already be added. Note that None implies that this is a user - supplied requirement, vs an inferred one. - :param extras_requested: an iterable of extras used to evaluate the - environement markers. - :return: Additional requirements to scan. That is either [] if - the requirement is not applicable, or [install_req] if the - requirement is applicable and has just been added. - """ - name = install_req.name - if not install_req.match_markers(extras_requested): - logger.warning("Ignoring %s: markers '%s' don't match your " - "environment", install_req.name, - install_req.markers) - return [] - - # This check has to come after we filter requirements with the - # environment markers. - if install_req.link and install_req.link.is_wheel: - wheel = Wheel(install_req.link.filename) - if not wheel.supported(): - raise InstallationError( - "%s is not a supported wheel on this platform." % - wheel.filename - ) - - install_req.as_egg = self.as_egg - install_req.use_user_site = self.use_user_site - install_req.target_dir = self.target_dir - install_req.pycompile = self.pycompile - install_req.is_direct = (parent_req_name is None) - - if not name: - # url or path requirement w/o an egg fragment - self.unnamed_requirements.append(install_req) - return [install_req] - else: - try: - existing_req = self.get_requirement(name) - except KeyError: - existing_req = None - if (parent_req_name is None and existing_req and not - existing_req.constraint and - existing_req.extras == install_req.extras and not - existing_req.req.specifier == install_req.req.specifier): - raise InstallationError( - 'Double requirement given: %s (already in %s, name=%r)' - % (install_req, existing_req, name)) - if not existing_req: - # Add requirement - self.requirements[name] = install_req - # FIXME: what about other normalizations? E.g., _ vs. -? - if name.lower() != name: - self.requirement_aliases[name.lower()] = name - result = [install_req] - else: - # Assume there's no need to scan, and that we've already - # encountered this for scanning. - result = [] - if not install_req.constraint and existing_req.constraint: - if (install_req.link and not (existing_req.link and - install_req.link.path == existing_req.link.path)): - self.reqs_to_cleanup.append(install_req) - raise InstallationError( - "Could not satisfy constraints for '%s': " - "installation from path or url cannot be " - "constrained to a version" % name) - # If we're now installing a constraint, mark the existing - # object for real installation. - existing_req.constraint = False - existing_req.extras = tuple( - sorted(set(existing_req.extras).union( - set(install_req.extras)))) - logger.debug("Setting %s extras to: %s", - existing_req, existing_req.extras) - # And now we need to scan this. - result = [existing_req] - # Canonicalise to the already-added object for the backref - # check below. - install_req = existing_req - if parent_req_name: - parent_req = self.get_requirement(parent_req_name) - self._dependencies[parent_req].append(install_req) - return result - - def has_requirement(self, project_name): - name = project_name.lower() - if (name in self.requirements and - not self.requirements[name].constraint or - name in self.requirement_aliases and - not self.requirements[self.requirement_aliases[name]].constraint): - return True - return False - - @property - def has_requirements(self): - return list(req for req in self.requirements.values() if not - req.constraint) or self.unnamed_requirements - - @property - def is_download(self): - if self.download_dir: - self.download_dir = expanduser(self.download_dir) - if os.path.exists(self.download_dir): - return True - else: - logger.critical('Could not find download directory') - raise InstallationError( - "Could not find or access download directory '%s'" - % display_path(self.download_dir)) - return False - - def get_requirement(self, project_name): - for name in project_name, project_name.lower(): - if name in self.requirements: - return self.requirements[name] - if name in self.requirement_aliases: - return self.requirements[self.requirement_aliases[name]] - raise KeyError("No project with the name %r" % project_name) - - def uninstall(self, auto_confirm=False): - for req in self.requirements.values(): - if req.constraint: - continue - req.uninstall(auto_confirm=auto_confirm) - req.commit_uninstall() - - def prepare_files(self, finder): - """ - Prepare process. Create temp directories, download and/or unpack files. - """ - # make the wheelhouse - if self.wheel_download_dir: - ensure_dir(self.wheel_download_dir) - - # If any top-level requirement has a hash specified, enter - # hash-checking mode, which requires hashes from all. - root_reqs = self.unnamed_requirements + self.requirements.values() - require_hashes = (self.require_hashes or - any(req.has_hash_options for req in root_reqs)) - if require_hashes and self.as_egg: - raise InstallationError( - '--egg is not allowed with --require-hashes mode, since it ' - 'delegates dependency resolution to setuptools and could thus ' - 'result in installation of unhashed packages.') - - # Actually prepare the files, and collect any exceptions. Most hash - # exceptions cannot be checked ahead of time, because - # req.populate_link() needs to be called before we can make decisions - # based on link type. - discovered_reqs = [] - hash_errors = HashErrors() - for req in chain(root_reqs, discovered_reqs): - try: - discovered_reqs.extend(self._prepare_file( - finder, - req, - require_hashes=require_hashes, - ignore_dependencies=self.ignore_dependencies)) - except HashError as exc: - exc.req = req - hash_errors.append(exc) - - if hash_errors: - raise hash_errors - - def _is_upgrade_allowed(self, req): - return self.upgrade and ( - self.upgrade_strategy == "eager" or ( - self.upgrade_strategy == "only-if-needed" and req.is_direct - ) - ) - - def _check_skip_installed(self, req_to_install, finder): - """Check if req_to_install should be skipped. - - This will check if the req is installed, and whether we should upgrade - or reinstall it, taking into account all the relevant user options. - - After calling this req_to_install will only have satisfied_by set to - None if the req_to_install is to be upgraded/reinstalled etc. Any - other value will be a dist recording the current thing installed that - satisfies the requirement. - - Note that for vcs urls and the like we can't assess skipping in this - routine - we simply identify that we need to pull the thing down, - then later on it is pulled down and introspected to assess upgrade/ - reinstalls etc. - - :return: A text reason for why it was skipped, or None. - """ - # Check whether to upgrade/reinstall this req or not. - req_to_install.check_if_exists() - if req_to_install.satisfied_by: - upgrade_allowed = self._is_upgrade_allowed(req_to_install) - - # Is the best version is installed. - best_installed = False - - if upgrade_allowed: - # For link based requirements we have to pull the - # tree down and inspect to assess the version #, so - # its handled way down. - if not (self.force_reinstall or req_to_install.link): - try: - finder.find_requirement( - req_to_install, upgrade_allowed) - except BestVersionAlreadyInstalled: - best_installed = True - except DistributionNotFound: - # No distribution found, so we squash the - # error - it will be raised later when we - # re-try later to do the install. - # Why don't we just raise here? - pass - - if not best_installed: - # don't uninstall conflict if user install and - # conflict is not user install - if not (self.use_user_site and not - dist_in_usersite(req_to_install.satisfied_by)): - req_to_install.conflicts_with = \ - req_to_install.satisfied_by - req_to_install.satisfied_by = None - - # Figure out a nice message to say why we're skipping this. - if best_installed: - skip_reason = 'already up-to-date' - elif self.upgrade_strategy == "only-if-needed": - skip_reason = 'not upgraded as not directly required' - else: - skip_reason = 'already satisfied' - - return skip_reason - else: - return None - - def _prepare_file(self, - finder, - req_to_install, - require_hashes=False, - ignore_dependencies=False): - """Prepare a single requirements file. - - :return: A list of additional InstallRequirements to also install. - """ - # Tell user what we are doing for this requirement: - # obtain (editable), skipping, processing (local url), collecting - # (remote url or package name) - if req_to_install.constraint or req_to_install.prepared: - return [] - - req_to_install.prepared = True - - # ###################### # - # # print log messages # # - # ###################### # - if req_to_install.editable: - logger.info('Obtaining %s', req_to_install) - else: - # satisfied_by is only evaluated by calling _check_skip_installed, - # so it must be None here. - assert req_to_install.satisfied_by is None - if not self.ignore_installed: - skip_reason = self._check_skip_installed( - req_to_install, finder) - - if req_to_install.satisfied_by: - assert skip_reason is not None, ( - '_check_skip_installed returned None but ' - 'req_to_install.satisfied_by is set to %r' - % (req_to_install.satisfied_by,)) - logger.info( - 'Requirement %s: %s', skip_reason, - req_to_install) - else: - if (req_to_install.link and - req_to_install.link.scheme == 'file'): - path = url_to_path(req_to_install.link.url) - logger.info('Processing %s', display_path(path)) - else: - logger.info('Collecting %s', req_to_install) - - with indent_log(): - # ################################ # - # # vcs update or unpack archive # # - # ################################ # - if req_to_install.editable: - if require_hashes: - raise InstallationError( - 'The editable requirement %s cannot be installed when ' - 'requiring hashes, because there is no single file to ' - 'hash.' % req_to_install) - req_to_install.ensure_has_source_dir(self.src_dir) - req_to_install.update_editable(not self.is_download) - abstract_dist = make_abstract_dist(req_to_install) - abstract_dist.prep_for_dist() - if self.is_download: - req_to_install.archive(self.download_dir) - req_to_install.check_if_exists() - elif req_to_install.satisfied_by: - if require_hashes: - logger.debug( - 'Since it is already installed, we are trusting this ' - 'package without checking its hash. To ensure a ' - 'completely repeatable environment, install into an ' - 'empty virtualenv.') - abstract_dist = Installed(req_to_install) - else: - # @@ if filesystem packages are not marked - # editable in a req, a non deterministic error - # occurs when the script attempts to unpack the - # build directory - req_to_install.ensure_has_source_dir(self.build_dir) - # If a checkout exists, it's unwise to keep going. version - # inconsistencies are logged later, but do not fail the - # installation. - # FIXME: this won't upgrade when there's an existing - # package unpacked in `req_to_install.source_dir` - if os.path.exists( - os.path.join(req_to_install.source_dir, 'setup.py')): - raise PreviousBuildDirError( - "pip can't proceed with requirements '%s' due to a" - " pre-existing build directory (%s). This is " - "likely due to a previous installation that failed" - ". pip is being responsible and not assuming it " - "can delete this. Please delete it and try again." - % (req_to_install, req_to_install.source_dir) - ) - req_to_install.populate_link( - finder, - self._is_upgrade_allowed(req_to_install), - require_hashes - ) - # We can't hit this spot and have populate_link return None. - # req_to_install.satisfied_by is None here (because we're - # guarded) and upgrade has no impact except when satisfied_by - # is not None. - # Then inside find_requirement existing_applicable -> False - # If no new versions are found, DistributionNotFound is raised, - # otherwise a result is guaranteed. - assert req_to_install.link - link = req_to_install.link - - # Now that we have the real link, we can tell what kind of - # requirements we have and raise some more informative errors - # than otherwise. (For example, we can raise VcsHashUnsupported - # for a VCS URL rather than HashMissing.) - if require_hashes: - # We could check these first 2 conditions inside - # unpack_url and save repetition of conditions, but then - # we would report less-useful error messages for - # unhashable requirements, complaining that there's no - # hash provided. - if is_vcs_url(link): - raise VcsHashUnsupported() - elif is_file_url(link) and is_dir_url(link): - raise DirectoryUrlHashUnsupported() - if (not req_to_install.original_link and - not req_to_install.is_pinned): - # Unpinned packages are asking for trouble when a new - # version is uploaded. This isn't a security check, but - # it saves users a surprising hash mismatch in the - # future. - # - # file:/// URLs aren't pinnable, so don't complain - # about them not being pinned. - raise HashUnpinned() - hashes = req_to_install.hashes( - trust_internet=not require_hashes) - if require_hashes and not hashes: - # Known-good hashes are missing for this requirement, so - # shim it with a facade object that will provoke hash - # computation and then raise a HashMissing exception - # showing the user what the hash should be. - hashes = MissingHashes() - - try: - download_dir = self.download_dir - # We always delete unpacked sdists after pip ran. - autodelete_unpacked = True - if req_to_install.link.is_wheel \ - and self.wheel_download_dir: - # when doing 'pip wheel` we download wheels to a - # dedicated dir. - download_dir = self.wheel_download_dir - if req_to_install.link.is_wheel: - if download_dir: - # When downloading, we only unpack wheels to get - # metadata. - autodelete_unpacked = True - else: - # When installing a wheel, we use the unpacked - # wheel. - autodelete_unpacked = False - unpack_url( - req_to_install.link, req_to_install.source_dir, - download_dir, autodelete_unpacked, - session=self.session, hashes=hashes) - except requests.HTTPError as exc: - logger.critical( - 'Could not install requirement %s because ' - 'of error %s', - req_to_install, - exc, - ) - raise InstallationError( - 'Could not install requirement %s because ' - 'of HTTP error %s for URL %s' % - (req_to_install, exc, req_to_install.link) - ) - abstract_dist = make_abstract_dist(req_to_install) - abstract_dist.prep_for_dist() - if self.is_download: - # Make a .zip of the source_dir we already created. - if req_to_install.link.scheme in vcs.all_schemes: - req_to_install.archive(self.download_dir) - # req_to_install.req is only avail after unpack for URL - # pkgs repeat check_if_exists to uninstall-on-upgrade - # (#14) - if not self.ignore_installed: - req_to_install.check_if_exists() - if req_to_install.satisfied_by: - if self.upgrade or self.ignore_installed: - # don't uninstall conflict if user install and - # conflict is not user install - if not (self.use_user_site and not - dist_in_usersite( - req_to_install.satisfied_by)): - req_to_install.conflicts_with = \ - req_to_install.satisfied_by - req_to_install.satisfied_by = None - else: - logger.info( - 'Requirement already satisfied (use ' - '--upgrade to upgrade): %s', - req_to_install, - ) - - # ###################### # - # # parse dependencies # # - # ###################### # - dist = abstract_dist.dist(finder) - try: - check_dist_requires_python(dist) - except UnsupportedPythonVersion as e: - if self.ignore_requires_python: - logger.warning(e.args[0]) - else: - req_to_install.remove_temporary_source() - raise - more_reqs = [] - - def add_req(subreq, extras_requested): - sub_install_req = InstallRequirement( - str(subreq), - req_to_install, - isolated=self.isolated, - wheel_cache=self._wheel_cache, - ) - more_reqs.extend(self.add_requirement( - sub_install_req, req_to_install.name, - extras_requested=extras_requested)) - - # We add req_to_install before its dependencies, so that we - # can refer to it when adding dependencies. - if not self.has_requirement(req_to_install.name): - # 'unnamed' requirements will get added here - self.add_requirement(req_to_install, None) - - if not ignore_dependencies: - if (req_to_install.extras): - logger.debug( - "Installing extra requirements: %r", - ','.join(req_to_install.extras), - ) - missing_requested = sorted( - set(req_to_install.extras) - set(dist.extras) - ) - for missing in missing_requested: - logger.warning( - '%s does not provide the extra \'%s\'', - dist, missing - ) - - available_requested = sorted( - set(dist.extras) & set(req_to_install.extras) - ) - for subreq in dist.requires(available_requested): - add_req(subreq, extras_requested=available_requested) - - # cleanup tmp src - self.reqs_to_cleanup.append(req_to_install) - - if not req_to_install.editable and not req_to_install.satisfied_by: - # XXX: --no-install leads this to report 'Successfully - # downloaded' for only non-editable reqs, even though we took - # action on them. - self.successfully_downloaded.append(req_to_install) - - return more_reqs - - def cleanup_files(self): - """Clean up files, remove builds.""" - logger.debug('Cleaning up...') - with indent_log(): - for req in self.reqs_to_cleanup: - req.remove_temporary_source() - - def _to_install(self): - """Create the installation order. - - The installation order is topological - requirements are installed - before the requiring thing. We break cycles at an arbitrary point, - and make no other guarantees. - """ - # The current implementation, which we may change at any point - # installs the user specified things in the order given, except when - # dependencies must come earlier to achieve topological order. - order = [] - ordered_reqs = set() - - def schedule(req): - if req.satisfied_by or req in ordered_reqs: - return - if req.constraint: - return - ordered_reqs.add(req) - for dep in self._dependencies[req]: - schedule(dep) - order.append(req) - for install_req in self.requirements.values(): - schedule(install_req) - return order - - def install(self, install_options, global_options=(), *args, **kwargs): - """ - Install everything in this set (after having downloaded and unpacked - the packages) - """ - to_install = self._to_install() - - if to_install: - logger.info( - 'Installing collected packages: %s', - ', '.join([req.name for req in to_install]), - ) - - with indent_log(): - for requirement in to_install: - if requirement.conflicts_with: - logger.info( - 'Found existing installation: %s', - requirement.conflicts_with, - ) - with indent_log(): - requirement.uninstall(auto_confirm=True) - try: - requirement.install( - install_options, - global_options, - *args, - **kwargs - ) - except: - # if install did not succeed, rollback previous uninstall - if (requirement.conflicts_with and not - requirement.install_succeeded): - requirement.rollback_uninstall() - raise - else: - if (requirement.conflicts_with and - requirement.install_succeeded): - requirement.commit_uninstall() - requirement.remove_temporary_source() - - self.successfully_installed = to_install diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_uninstall.py b/classifier/myenv/lib/python3.6/site-packages/pip/req/req_uninstall.py deleted file mode 100644 index 5248430..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/req/req_uninstall.py +++ /dev/null @@ -1,195 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import tempfile - -from pip.compat import uses_pycache, WINDOWS, cache_from_source -from pip.exceptions import UninstallationError -from pip.utils import rmtree, ask, is_local, renames, normalize_path -from pip.utils.logging import indent_log - - -logger = logging.getLogger(__name__) - - -class UninstallPathSet(object): - """A set of file paths to be removed in the uninstallation of a - requirement.""" - def __init__(self, dist): - self.paths = set() - self._refuse = set() - self.pth = {} - self.dist = dist - self.save_dir = None - self._moved_paths = [] - - def _permitted(self, path): - """ - Return True if the given path is one we are permitted to - remove/modify, False otherwise. - - """ - return is_local(path) - - def add(self, path): - head, tail = os.path.split(path) - - # we normalize the head to resolve parent directory symlinks, but not - # the tail, since we only want to uninstall symlinks, not their targets - path = os.path.join(normalize_path(head), os.path.normcase(tail)) - - if not os.path.exists(path): - return - if self._permitted(path): - self.paths.add(path) - else: - self._refuse.add(path) - - # __pycache__ files can show up after 'installed-files.txt' is created, - # due to imports - if os.path.splitext(path)[1] == '.py' and uses_pycache: - self.add(cache_from_source(path)) - - def add_pth(self, pth_file, entry): - pth_file = normalize_path(pth_file) - if self._permitted(pth_file): - if pth_file not in self.pth: - self.pth[pth_file] = UninstallPthEntries(pth_file) - self.pth[pth_file].add(entry) - else: - self._refuse.add(pth_file) - - def compact(self, paths): - """Compact a path set to contain the minimal number of paths - necessary to contain all paths in the set. If /a/path/ and - /a/path/to/a/file.txt are both in the set, leave only the - shorter path.""" - short_paths = set() - for path in sorted(paths, key=len): - if not any([ - (path.startswith(shortpath) and - path[len(shortpath.rstrip(os.path.sep))] == os.path.sep) - for shortpath in short_paths]): - short_paths.add(path) - return short_paths - - def _stash(self, path): - return os.path.join( - self.save_dir, os.path.splitdrive(path)[1].lstrip(os.path.sep)) - - def remove(self, auto_confirm=False): - """Remove paths in ``self.paths`` with confirmation (unless - ``auto_confirm`` is True).""" - if not self.paths: - logger.info( - "Can't uninstall '%s'. No files were found to uninstall.", - self.dist.project_name, - ) - return - logger.info( - 'Uninstalling %s-%s:', - self.dist.project_name, self.dist.version - ) - - with indent_log(): - paths = sorted(self.compact(self.paths)) - - if auto_confirm: - response = 'y' - else: - for path in paths: - logger.info(path) - response = ask('Proceed (y/n)? ', ('y', 'n')) - if self._refuse: - logger.info('Not removing or modifying (outside of prefix):') - for path in self.compact(self._refuse): - logger.info(path) - if response == 'y': - self.save_dir = tempfile.mkdtemp(suffix='-uninstall', - prefix='pip-') - for path in paths: - new_path = self._stash(path) - logger.debug('Removing file or directory %s', path) - self._moved_paths.append(path) - renames(path, new_path) - for pth in self.pth.values(): - pth.remove() - logger.info( - 'Successfully uninstalled %s-%s', - self.dist.project_name, self.dist.version - ) - - def rollback(self): - """Rollback the changes previously made by remove().""" - if self.save_dir is None: - logger.error( - "Can't roll back %s; was not uninstalled", - self.dist.project_name, - ) - return False - logger.info('Rolling back uninstall of %s', self.dist.project_name) - for path in self._moved_paths: - tmp_path = self._stash(path) - logger.debug('Replacing %s', path) - renames(tmp_path, path) - for pth in self.pth.values(): - pth.rollback() - - def commit(self): - """Remove temporary save dir: rollback will no longer be possible.""" - if self.save_dir is not None: - rmtree(self.save_dir) - self.save_dir = None - self._moved_paths = [] - - -class UninstallPthEntries(object): - def __init__(self, pth_file): - if not os.path.isfile(pth_file): - raise UninstallationError( - "Cannot remove entries from nonexistent file %s" % pth_file - ) - self.file = pth_file - self.entries = set() - self._saved_lines = None - - def add(self, entry): - entry = os.path.normcase(entry) - # On Windows, os.path.normcase converts the entry to use - # backslashes. This is correct for entries that describe absolute - # paths outside of site-packages, but all the others use forward - # slashes. - if WINDOWS and not os.path.splitdrive(entry)[0]: - entry = entry.replace('\\', '/') - self.entries.add(entry) - - def remove(self): - logger.debug('Removing pth entries from %s:', self.file) - with open(self.file, 'rb') as fh: - # windows uses '\r\n' with py3k, but uses '\n' with py2.x - lines = fh.readlines() - self._saved_lines = lines - if any(b'\r\n' in line for line in lines): - endline = '\r\n' - else: - endline = '\n' - for entry in self.entries: - try: - logger.debug('Removing entry: %s', entry) - lines.remove((entry + endline).encode("utf-8")) - except ValueError: - pass - with open(self.file, 'wb') as fh: - fh.writelines(lines) - - def rollback(self): - if self._saved_lines is None: - logger.error( - 'Cannot roll back changes to %s, none were made', self.file - ) - return False - logger.debug('Rolling %s back to previous state', self.file) - with open(self.file, 'wb') as fh: - fh.writelines(self._saved_lines) - return True diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/status_codes.py b/classifier/myenv/lib/python3.6/site-packages/pip/status_codes.py deleted file mode 100644 index 275360a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/status_codes.py +++ /dev/null @@ -1,8 +0,0 @@ -from __future__ import absolute_import - -SUCCESS = 0 -ERROR = 1 -UNKNOWN_ERROR = 2 -VIRTUALENV_NOT_FOUND = 3 -PREVIOUS_BUILD_DIR_ERROR = 4 -NO_MATCHES_FOUND = 23 diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__init__.py deleted file mode 100644 index 0d25d91..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__init__.py +++ /dev/null @@ -1,870 +0,0 @@ -from __future__ import absolute_import - -from collections import deque -import contextlib -import errno -import io -import locale -# we have a submodule named 'logging' which would shadow this if we used the -# regular name: -import logging as std_logging -import re -import os -import posixpath -import shutil -import stat -import subprocess -import sys -import tarfile -import zipfile - -from pip.exceptions import InstallationError -from pip.compat import console_to_str, expanduser, stdlib_pkgs -from pip.locations import ( - site_packages, user_site, running_under_virtualenv, virtualenv_no_global, - write_delete_marker_file, -) -from pip._vendor import pkg_resources -from pip._vendor.six.moves import input -from pip._vendor.six import PY2 -from pip._vendor.retrying import retry - -if PY2: - from io import BytesIO as StringIO -else: - from io import StringIO - -__all__ = ['rmtree', 'display_path', 'backup_dir', - 'ask', 'splitext', - 'format_size', 'is_installable_dir', - 'is_svn_page', 'file_contents', - 'split_leading_dir', 'has_leading_dir', - 'normalize_path', - 'renames', 'get_terminal_size', 'get_prog', - 'unzip_file', 'untar_file', 'unpack_file', 'call_subprocess', - 'captured_stdout', 'ensure_dir', - 'ARCHIVE_EXTENSIONS', 'SUPPORTED_EXTENSIONS', - 'get_installed_version'] - - -logger = std_logging.getLogger(__name__) - -BZ2_EXTENSIONS = ('.tar.bz2', '.tbz') -XZ_EXTENSIONS = ('.tar.xz', '.txz', '.tlz', '.tar.lz', '.tar.lzma') -ZIP_EXTENSIONS = ('.zip', '.whl') -TAR_EXTENSIONS = ('.tar.gz', '.tgz', '.tar') -ARCHIVE_EXTENSIONS = ( - ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS) -SUPPORTED_EXTENSIONS = ZIP_EXTENSIONS + TAR_EXTENSIONS -try: - import bz2 # noqa - SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS -except ImportError: - logger.debug('bz2 module is not available') - -try: - # Only for Python 3.3+ - import lzma # noqa - SUPPORTED_EXTENSIONS += XZ_EXTENSIONS -except ImportError: - logger.debug('lzma module is not available') - - -def import_or_raise(pkg_or_module_string, ExceptionType, *args, **kwargs): - try: - return __import__(pkg_or_module_string) - except ImportError: - raise ExceptionType(*args, **kwargs) - - -def ensure_dir(path): - """os.path.makedirs without EEXIST.""" - try: - os.makedirs(path) - except OSError as e: - if e.errno != errno.EEXIST: - raise - - -def get_prog(): - try: - if os.path.basename(sys.argv[0]) in ('__main__.py', '-c'): - return "%s -m pip" % sys.executable - except (AttributeError, TypeError, IndexError): - pass - return 'pip' - - -# Retry every half second for up to 3 seconds -@retry(stop_max_delay=3000, wait_fixed=500) -def rmtree(dir, ignore_errors=False): - shutil.rmtree(dir, ignore_errors=ignore_errors, - onerror=rmtree_errorhandler) - - -def rmtree_errorhandler(func, path, exc_info): - """On Windows, the files in .svn are read-only, so when rmtree() tries to - remove them, an exception is thrown. We catch that here, remove the - read-only attribute, and hopefully continue without problems.""" - # if file type currently read only - if os.stat(path).st_mode & stat.S_IREAD: - # convert to read/write - os.chmod(path, stat.S_IWRITE) - # use the original function to repeat the operation - func(path) - return - else: - raise - - -def display_path(path): - """Gives the display value for a given path, making it relative to cwd - if possible.""" - path = os.path.normcase(os.path.abspath(path)) - if sys.version_info[0] == 2: - path = path.decode(sys.getfilesystemencoding(), 'replace') - path = path.encode(sys.getdefaultencoding(), 'replace') - if path.startswith(os.getcwd() + os.path.sep): - path = '.' + path[len(os.getcwd()):] - return path - - -def backup_dir(dir, ext='.bak'): - """Figure out the name of a directory to back up the given dir to - (adding .bak, .bak2, etc)""" - n = 1 - extension = ext - while os.path.exists(dir + extension): - n += 1 - extension = ext + str(n) - return dir + extension - - -def ask_path_exists(message, options): - for action in os.environ.get('PIP_EXISTS_ACTION', '').split(): - if action in options: - return action - return ask(message, options) - - -def ask(message, options): - """Ask the message interactively, with the given possible responses""" - while 1: - if os.environ.get('PIP_NO_INPUT'): - raise Exception( - 'No input was expected ($PIP_NO_INPUT set); question: %s' % - message - ) - response = input(message) - response = response.strip().lower() - if response not in options: - print( - 'Your response (%r) was not one of the expected responses: ' - '%s' % (response, ', '.join(options)) - ) - else: - return response - - -def format_size(bytes): - if bytes > 1000 * 1000: - return '%.1fMB' % (bytes / 1000.0 / 1000) - elif bytes > 10 * 1000: - return '%ikB' % (bytes / 1000) - elif bytes > 1000: - return '%.1fkB' % (bytes / 1000.0) - else: - return '%ibytes' % bytes - - -def is_installable_dir(path): - """Return True if `path` is a directory containing a setup.py file.""" - if not os.path.isdir(path): - return False - setup_py = os.path.join(path, 'setup.py') - if os.path.isfile(setup_py): - return True - return False - - -def is_svn_page(html): - """ - Returns true if the page appears to be the index page of an svn repository - """ - return (re.search(r'[^<]*Revision \d+:', html) and - re.search(r'Powered by (?:<a[^>]*?>)?Subversion', html, re.I)) - - -def file_contents(filename): - with open(filename, 'rb') as fp: - return fp.read().decode('utf-8') - - -def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE): - """Yield pieces of data from a file-like object until EOF.""" - while True: - chunk = file.read(size) - if not chunk: - break - yield chunk - - -def split_leading_dir(path): - path = path.lstrip('/').lstrip('\\') - if '/' in path and (('\\' in path and path.find('/') < path.find('\\')) or - '\\' not in path): - return path.split('/', 1) - elif '\\' in path: - return path.split('\\', 1) - else: - return path, '' - - -def has_leading_dir(paths): - """Returns true if all the paths have the same leading path name - (i.e., everything is in one subdirectory in an archive)""" - common_prefix = None - for path in paths: - prefix, rest = split_leading_dir(path) - if not prefix: - return False - elif common_prefix is None: - common_prefix = prefix - elif prefix != common_prefix: - return False - return True - - -def normalize_path(path, resolve_symlinks=True): - """ - Convert a path to its canonical, case-normalized, absolute version. - - """ - path = expanduser(path) - if resolve_symlinks: - path = os.path.realpath(path) - else: - path = os.path.abspath(path) - return os.path.normcase(path) - - -def splitext(path): - """Like os.path.splitext, but take off .tar too""" - base, ext = posixpath.splitext(path) - if base.lower().endswith('.tar'): - ext = base[-4:] + ext - base = base[:-4] - return base, ext - - -def renames(old, new): - """Like os.renames(), but handles renaming across devices.""" - # Implementation borrowed from os.renames(). - head, tail = os.path.split(new) - if head and tail and not os.path.exists(head): - os.makedirs(head) - - shutil.move(old, new) - - head, tail = os.path.split(old) - if head and tail: - try: - os.removedirs(head) - except OSError: - pass - - -def is_local(path): - """ - Return True if this is a path pip is allowed to modify. - - If we're in a virtualenv, sys.prefix points to the virtualenv's - prefix; only sys.prefix is considered local. - - If we're not in a virtualenv, in general we can modify anything. - However, if the OS vendor has configured distutils to install - somewhere other than sys.prefix (which could be a subdirectory of - sys.prefix, e.g. /usr/local), we consider sys.prefix itself nonlocal - and the domain of the OS vendor. (In other words, everything _other - than_ sys.prefix is considered local.) - - """ - - path = normalize_path(path) - prefix = normalize_path(sys.prefix) - - if running_under_virtualenv(): - return path.startswith(normalize_path(sys.prefix)) - else: - from pip.locations import distutils_scheme - if path.startswith(prefix): - for local_path in distutils_scheme("").values(): - if path.startswith(normalize_path(local_path)): - return True - return False - else: - return True - - -def dist_is_local(dist): - """ - Return True if given Distribution object is installed somewhere pip - is allowed to modify. - - """ - return is_local(dist_location(dist)) - - -def dist_in_usersite(dist): - """ - Return True if given Distribution is installed in user site. - """ - norm_path = normalize_path(dist_location(dist)) - return norm_path.startswith(normalize_path(user_site)) - - -def dist_in_site_packages(dist): - """ - Return True if given Distribution is installed in - distutils.sysconfig.get_python_lib(). - """ - return normalize_path( - dist_location(dist) - ).startswith(normalize_path(site_packages)) - - -def dist_is_editable(dist): - """Is distribution an editable install?""" - for path_item in sys.path: - egg_link = os.path.join(path_item, dist.project_name + '.egg-link') - if os.path.isfile(egg_link): - return True - return False - - -def get_installed_distributions(local_only=True, - skip=stdlib_pkgs, - include_editables=True, - editables_only=False, - user_only=False): - """ - Return a list of installed Distribution objects. - - If ``local_only`` is True (default), only return installations - local to the current virtualenv, if in a virtualenv. - - ``skip`` argument is an iterable of lower-case project names to - ignore; defaults to stdlib_pkgs - - If ``editables`` is False, don't report editables. - - If ``editables_only`` is True , only report editables. - - If ``user_only`` is True , only report installations in the user - site directory. - - """ - if local_only: - local_test = dist_is_local - else: - def local_test(d): - return True - - if include_editables: - def editable_test(d): - return True - else: - def editable_test(d): - return not dist_is_editable(d) - - if editables_only: - def editables_only_test(d): - return dist_is_editable(d) - else: - def editables_only_test(d): - return True - - if user_only: - user_test = dist_in_usersite - else: - def user_test(d): - return True - - return [d for d in pkg_resources.working_set - if local_test(d) and - d.key not in skip and - editable_test(d) and - editables_only_test(d) and - user_test(d) - ] - - -def egg_link_path(dist): - """ - Return the path for the .egg-link file if it exists, otherwise, None. - - There's 3 scenarios: - 1) not in a virtualenv - try to find in site.USER_SITE, then site_packages - 2) in a no-global virtualenv - try to find in site_packages - 3) in a yes-global virtualenv - try to find in site_packages, then site.USER_SITE - (don't look in global location) - - For #1 and #3, there could be odd cases, where there's an egg-link in 2 - locations. - - This method will just return the first one found. - """ - sites = [] - if running_under_virtualenv(): - if virtualenv_no_global(): - sites.append(site_packages) - else: - sites.append(site_packages) - if user_site: - sites.append(user_site) - else: - if user_site: - sites.append(user_site) - sites.append(site_packages) - - for site in sites: - egglink = os.path.join(site, dist.project_name) + '.egg-link' - if os.path.isfile(egglink): - return egglink - - -def dist_location(dist): - """ - Get the site-packages location of this distribution. Generally - this is dist.location, except in the case of develop-installed - packages, where dist.location is the source code location, and we - want to know where the egg-link file is. - - """ - egg_link = egg_link_path(dist) - if egg_link: - return egg_link - return dist.location - - -def get_terminal_size(): - """Returns a tuple (x, y) representing the width(x) and the height(x) - in characters of the terminal window.""" - def ioctl_GWINSZ(fd): - try: - import fcntl - import termios - import struct - cr = struct.unpack( - 'hh', - fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234') - ) - except: - return None - if cr == (0, 0): - return None - return cr - cr = ioctl_GWINSZ(0) or ioctl_GWINSZ(1) or ioctl_GWINSZ(2) - if not cr: - try: - fd = os.open(os.ctermid(), os.O_RDONLY) - cr = ioctl_GWINSZ(fd) - os.close(fd) - except: - pass - if not cr: - cr = (os.environ.get('LINES', 25), os.environ.get('COLUMNS', 80)) - return int(cr[1]), int(cr[0]) - - -def current_umask(): - """Get the current umask which involves having to set it temporarily.""" - mask = os.umask(0) - os.umask(mask) - return mask - - -def unzip_file(filename, location, flatten=True): - """ - Unzip the file (with path `filename`) to the destination `location`. All - files are written based on system defaults and umask (i.e. permissions are - not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - zipfp = open(filename, 'rb') - try: - zip = zipfile.ZipFile(zipfp, allowZip64=True) - leading = has_leading_dir(zip.namelist()) and flatten - for info in zip.infolist(): - name = info.filename - data = zip.read(name) - fn = name - if leading: - fn = split_leading_dir(name)[1] - fn = os.path.join(location, fn) - dir = os.path.dirname(fn) - if fn.endswith('/') or fn.endswith('\\'): - # A directory - ensure_dir(fn) - else: - ensure_dir(dir) - fp = open(fn, 'wb') - try: - fp.write(data) - finally: - fp.close() - mode = info.external_attr >> 16 - # if mode and regular file and any execute permissions for - # user/group/world? - if mode and stat.S_ISREG(mode) and mode & 0o111: - # make dest file have execute for user/group/world - # (chmod +x) no-op on windows per python docs - os.chmod(fn, (0o777 - current_umask() | 0o111)) - finally: - zipfp.close() - - -def untar_file(filename, location): - """ - Untar the file (with path `filename`) to the destination `location`. - All files are written based on system defaults and umask (i.e. permissions - are not preserved), except that regular file members with any execute - permissions (user, group, or world) have "chmod +x" applied after being - written. Note that for windows, any execute changes using os.chmod are - no-ops per the python docs. - """ - ensure_dir(location) - if filename.lower().endswith('.gz') or filename.lower().endswith('.tgz'): - mode = 'r:gz' - elif filename.lower().endswith(BZ2_EXTENSIONS): - mode = 'r:bz2' - elif filename.lower().endswith(XZ_EXTENSIONS): - mode = 'r:xz' - elif filename.lower().endswith('.tar'): - mode = 'r' - else: - logger.warning( - 'Cannot determine compression type for file %s', filename, - ) - mode = 'r:*' - tar = tarfile.open(filename, mode) - try: - # note: python<=2.5 doesn't seem to know about pax headers, filter them - leading = has_leading_dir([ - member.name for member in tar.getmembers() - if member.name != 'pax_global_header' - ]) - for member in tar.getmembers(): - fn = member.name - if fn == 'pax_global_header': - continue - if leading: - fn = split_leading_dir(fn)[1] - path = os.path.join(location, fn) - if member.isdir(): - ensure_dir(path) - elif member.issym(): - try: - tar._extract_member(member, path) - except Exception as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - else: - try: - fp = tar.extractfile(member) - except (KeyError, AttributeError) as exc: - # Some corrupt tar files seem to produce this - # (specifically bad symlinks) - logger.warning( - 'In the tar file %s the member %s is invalid: %s', - filename, member.name, exc, - ) - continue - ensure_dir(os.path.dirname(path)) - with open(path, 'wb') as destfp: - shutil.copyfileobj(fp, destfp) - fp.close() - # Update the timestamp (useful for cython compiled files) - tar.utime(member, path) - # member have any execute permissions for user/group/world? - if member.mode & 0o111: - # make dest file have execute for user/group/world - # no-op on windows per python docs - os.chmod(path, (0o777 - current_umask() | 0o111)) - finally: - tar.close() - - -def unpack_file(filename, location, content_type, link): - filename = os.path.realpath(filename) - if (content_type == 'application/zip' or - filename.lower().endswith(ZIP_EXTENSIONS) or - zipfile.is_zipfile(filename)): - unzip_file( - filename, - location, - flatten=not filename.endswith('.whl') - ) - elif (content_type == 'application/x-gzip' or - tarfile.is_tarfile(filename) or - filename.lower().endswith( - TAR_EXTENSIONS + BZ2_EXTENSIONS + XZ_EXTENSIONS)): - untar_file(filename, location) - elif (content_type and content_type.startswith('text/html') and - is_svn_page(file_contents(filename))): - # We don't really care about this - from pip.vcs.subversion import Subversion - Subversion('svn+' + link.url).unpack(location) - else: - # FIXME: handle? - # FIXME: magic signatures? - logger.critical( - 'Cannot unpack file %s (downloaded from %s, content-type: %s); ' - 'cannot detect archive format', - filename, location, content_type, - ) - raise InstallationError( - 'Cannot determine archive format of %s' % location - ) - - -def call_subprocess(cmd, show_stdout=True, cwd=None, - on_returncode='raise', - command_desc=None, - extra_environ=None, spinner=None): - # This function's handling of subprocess output is confusing and I - # previously broke it terribly, so as penance I will write a long comment - # explaining things. - # - # The obvious thing that affects output is the show_stdout= - # kwarg. show_stdout=True means, let the subprocess write directly to our - # stdout. Even though it is nominally the default, it is almost never used - # inside pip (and should not be used in new code without a very good - # reason); as of 2016-02-22 it is only used in a few places inside the VCS - # wrapper code. Ideally we should get rid of it entirely, because it - # creates a lot of complexity here for a rarely used feature. - # - # Most places in pip set show_stdout=False. What this means is: - # - We connect the child stdout to a pipe, which we read. - # - By default, we hide the output but show a spinner -- unless the - # subprocess exits with an error, in which case we show the output. - # - If the --verbose option was passed (= loglevel is DEBUG), then we show - # the output unconditionally. (But in this case we don't want to show - # the output a second time if it turns out that there was an error.) - # - # stderr is always merged with stdout (even if show_stdout=True). - if show_stdout: - stdout = None - else: - stdout = subprocess.PIPE - if command_desc is None: - cmd_parts = [] - for part in cmd: - if ' ' in part or '\n' in part or '"' in part or "'" in part: - part = '"%s"' % part.replace('"', '\\"') - cmd_parts.append(part) - command_desc = ' '.join(cmd_parts) - logger.debug("Running command %s", command_desc) - env = os.environ.copy() - if extra_environ: - env.update(extra_environ) - try: - proc = subprocess.Popen( - cmd, stderr=subprocess.STDOUT, stdin=None, stdout=stdout, - cwd=cwd, env=env) - except Exception as exc: - logger.critical( - "Error %s while executing command %s", exc, command_desc, - ) - raise - if stdout is not None: - all_output = [] - while True: - line = console_to_str(proc.stdout.readline()) - if not line: - break - line = line.rstrip() - all_output.append(line + '\n') - if logger.getEffectiveLevel() <= std_logging.DEBUG: - # Show the line immediately - logger.debug(line) - else: - # Update the spinner - if spinner is not None: - spinner.spin() - proc.wait() - if spinner is not None: - if proc.returncode: - spinner.finish("error") - else: - spinner.finish("done") - if proc.returncode: - if on_returncode == 'raise': - if (logger.getEffectiveLevel() > std_logging.DEBUG and - not show_stdout): - logger.info( - 'Complete output from command %s:', command_desc, - ) - logger.info( - ''.join(all_output) + - '\n----------------------------------------' - ) - raise InstallationError( - 'Command "%s" failed with error code %s in %s' - % (command_desc, proc.returncode, cwd)) - elif on_returncode == 'warn': - logger.warning( - 'Command "%s" had error code %s in %s', - command_desc, proc.returncode, cwd, - ) - elif on_returncode == 'ignore': - pass - else: - raise ValueError('Invalid value: on_returncode=%s' % - repr(on_returncode)) - if not show_stdout: - return ''.join(all_output) - - -def read_text_file(filename): - """Return the contents of *filename*. - - Try to decode the file contents with utf-8, the preferred system encoding - (e.g., cp1252 on some Windows machines), and latin1, in that order. - Decoding a byte string with latin1 will never raise an error. In the worst - case, the returned string will contain some garbage characters. - - """ - with open(filename, 'rb') as fp: - data = fp.read() - - encodings = ['utf-8', locale.getpreferredencoding(False), 'latin1'] - for enc in encodings: - try: - data = data.decode(enc) - except UnicodeDecodeError: - continue - break - - assert type(data) != bytes # Latin1 should have worked. - return data - - -def _make_build_dir(build_dir): - os.makedirs(build_dir) - write_delete_marker_file(build_dir) - - -class FakeFile(object): - """Wrap a list of lines in an object with readline() to make - ConfigParser happy.""" - def __init__(self, lines): - self._gen = (l for l in lines) - - def readline(self): - try: - try: - return next(self._gen) - except NameError: - return self._gen.next() - except StopIteration: - return '' - - def __iter__(self): - return self._gen - - -class StreamWrapper(StringIO): - - @classmethod - def from_stream(cls, orig_stream): - cls.orig_stream = orig_stream - return cls() - - # compileall.compile_dir() needs stdout.encoding to print to stdout - @property - def encoding(self): - return self.orig_stream.encoding - - -@contextlib.contextmanager -def captured_output(stream_name): - """Return a context manager used by captured_stdout/stdin/stderr - that temporarily replaces the sys stream *stream_name* with a StringIO. - - Taken from Lib/support/__init__.py in the CPython repo. - """ - orig_stdout = getattr(sys, stream_name) - setattr(sys, stream_name, StreamWrapper.from_stream(orig_stdout)) - try: - yield getattr(sys, stream_name) - finally: - setattr(sys, stream_name, orig_stdout) - - -def captured_stdout(): - """Capture the output of sys.stdout: - - with captured_stdout() as stdout: - print('hello') - self.assertEqual(stdout.getvalue(), 'hello\n') - - Taken from Lib/support/__init__.py in the CPython repo. - """ - return captured_output('stdout') - - -class cached_property(object): - """A property that is only computed once per instance and then replaces - itself with an ordinary attribute. Deleting the attribute resets the - property. - - Source: https://github.com/bottlepy/bottle/blob/0.11.5/bottle.py#L175 - """ - - def __init__(self, func): - self.__doc__ = getattr(func, '__doc__') - self.func = func - - def __get__(self, obj, cls): - if obj is None: - # We're being accessed from the class itself, not from an object - return self - value = obj.__dict__[self.func.__name__] = self.func(obj) - return value - - -def get_installed_version(dist_name, lookup_dirs=None): - """Get the installed version of dist_name avoiding pkg_resources cache""" - # Create a requirement that we'll look for inside of setuptools. - req = pkg_resources.Requirement.parse(dist_name) - - # We want to avoid having this cached, so we need to construct a new - # working set each time. - if lookup_dirs is None: - working_set = pkg_resources.WorkingSet() - else: - working_set = pkg_resources.WorkingSet(lookup_dirs) - - # Get the installed distribution from our working set - dist = working_set.find(req) - - # Check to see if we got an installed distribution or not, if we did - # we want to return it's version. - return dist.version if dist else None - - -def consume(iterator): - """Consume an iterable at C speed.""" - deque(iterator, maxlen=0) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 92db3ae..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/appdirs.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/appdirs.cpython-36.pyc deleted file mode 100644 index 27493d7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/appdirs.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/build.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/build.cpython-36.pyc deleted file mode 100644 index 6c60306..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/build.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/deprecation.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/deprecation.cpython-36.pyc deleted file mode 100644 index bab5afc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/deprecation.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/encoding.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/encoding.cpython-36.pyc deleted file mode 100644 index 04868cc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/encoding.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/filesystem.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/filesystem.cpython-36.pyc deleted file mode 100644 index 81f3498..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/filesystem.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/glibc.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/glibc.cpython-36.pyc deleted file mode 100644 index db9e0ea..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/glibc.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/hashes.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/hashes.cpython-36.pyc deleted file mode 100644 index c4e2d03..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/hashes.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/logging.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/logging.cpython-36.pyc deleted file mode 100644 index 1c73a7b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/logging.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/outdated.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/outdated.cpython-36.pyc deleted file mode 100644 index 8af2ba8..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/outdated.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/packaging.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/packaging.cpython-36.pyc deleted file mode 100644 index 17655eb..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/packaging.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/setuptools_build.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/setuptools_build.cpython-36.pyc deleted file mode 100644 index b1eda99..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/setuptools_build.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/ui.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/ui.cpython-36.pyc deleted file mode 100644 index 31efc50..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/utils/__pycache__/ui.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/appdirs.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/appdirs.py deleted file mode 100644 index 9b82801..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/appdirs.py +++ /dev/null @@ -1,248 +0,0 @@ -""" -This code was taken from https://github.com/ActiveState/appdirs and modified -to suit our purposes. -""" -from __future__ import absolute_import - -import os -import sys - -from pip.compat import WINDOWS, expanduser -from pip._vendor.six import PY2, text_type - - -def user_cache_dir(appname): - r""" - Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - - Typical user cache directories are: - macOS: ~/Library/Caches/<AppName> - Unix: ~/.cache/<AppName> (XDG default) - Windows: C:\Users\<username>\AppData\Local\<AppName>\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go - in the `CSIDL_LOCAL_APPDATA` directory. This is identical to the - non-roaming app data dir (the default returned by `user_data_dir`). Apps - typically put cache data somewhere *under* the given dir here. Some - examples: - ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache - ...\Acme\SuperApp\Cache\1.0 - - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - """ - if WINDOWS: - # Get the base path - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - - # When using Python 2, return paths as bytes on Windows like we do on - # other operating systems. See helper function docs for more details. - if PY2 and isinstance(path, text_type): - path = _win_path_to_bytes(path) - - # Add our app name and Cache directory to it - path = os.path.join(path, appname, "Cache") - elif sys.platform == "darwin": - # Get the base path - path = expanduser("~/Library/Caches") - - # Add our app name to it - path = os.path.join(path, appname) - else: - # Get the base path - path = os.getenv("XDG_CACHE_HOME", expanduser("~/.cache")) - - # Add our app name to it - path = os.path.join(path, appname) - - return path - - -def user_data_dir(appname, roaming=False): - """ - Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - macOS: ~/Library/Application Support/<AppName> - Unix: ~/.local/share/<AppName> # or in - $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\<username>\ ... - ...Application Data\<AppName> - Win XP (roaming): C:\Documents and Settings\<username>\Local ... - ...Settings\Application Data\<AppName> - Win 7 (not roaming): C:\\Users\<username>\AppData\Local\<AppName> - Win 7 (roaming): C:\\Users\<username>\AppData\Roaming\<AppName> - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/<AppName>". - """ - if WINDOWS: - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.join(os.path.normpath(_get_win_folder(const)), appname) - elif sys.platform == "darwin": - path = os.path.join( - expanduser('~/Library/Application Support/'), - appname, - ) - else: - path = os.path.join( - os.getenv('XDG_DATA_HOME', expanduser("~/.local/share")), - appname, - ) - - return path - - -def user_config_dir(appname, roaming=True): - """Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "roaming" (boolean, default True) can be set False to not use the - Windows roaming appdata directory. That means that for users on a - Windows network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - macOS: same as user_data_dir - Unix: ~/.config/<AppName> - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by default "~/.config/<AppName>". - """ - if WINDOWS: - path = user_data_dir(appname, roaming=roaming) - elif sys.platform == "darwin": - path = user_data_dir(appname) - else: - path = os.getenv('XDG_CONFIG_HOME', expanduser("~/.config")) - path = os.path.join(path, appname) - - return path - - -# for the discussion regarding site_config_dirs locations -# see <https://github.com/pypa/pip/issues/1733> -def site_config_dirs(appname): - """Return a list of potential user-shared config dirs for this application. - - "appname" is the name of application. - - Typical user config directories are: - macOS: /Library/Application Support/<AppName>/ - Unix: /etc or $XDG_CONFIG_DIRS[i]/<AppName>/ for each value in - $XDG_CONFIG_DIRS - Win XP: C:\Documents and Settings\All Users\Application ... - ...Data\<AppName>\ - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory - on Vista.) - Win 7: Hidden, but writeable on Win 7: - C:\ProgramData\<AppName>\ - """ - if WINDOWS: - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - pathlist = [os.path.join(path, appname)] - elif sys.platform == 'darwin': - pathlist = [os.path.join('/Library/Application Support', appname)] - else: - # try looking in $XDG_CONFIG_DIRS - xdg_config_dirs = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - if xdg_config_dirs: - pathlist = [ - os.path.join(expanduser(x), appname) - for x in xdg_config_dirs.split(os.pathsep) - ] - else: - pathlist = [] - - # always look in /etc directly as well - pathlist.append('/etc') - - return pathlist - - -# -- Windows support functions -- - -def _get_win_folder_from_registry(csidl_name): - """ - This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - directory, _type = _winreg.QueryValueEx(key, shell_folder_name) - return directory - - -def _get_win_folder_with_ctypes(csidl_name): - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -if WINDOWS: - try: - import ctypes - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -def _win_path_to_bytes(path): - """Encode Windows paths to bytes. Only used on Python 2. - - Motivation is to be consistent with other operating systems where paths - are also returned as bytes. This avoids problems mixing bytes and Unicode - elsewhere in the codebase. For more details and discussion see - <https://github.com/pypa/pip/issues/3463>. - - If encoding using ASCII and MBCS fails, return the original Unicode path. - """ - for encoding in ('ASCII', 'MBCS'): - try: - return path.encode(encoding) - except (UnicodeEncodeError, LookupError): - pass - return path diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/build.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/build.py deleted file mode 100644 index fc65cfa..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/build.py +++ /dev/null @@ -1,42 +0,0 @@ -from __future__ import absolute_import - -import os.path -import tempfile - -from pip.utils import rmtree - - -class BuildDirectory(object): - - def __init__(self, name=None, delete=None): - # If we were not given an explicit directory, and we were not given an - # explicit delete option, then we'll default to deleting. - if name is None and delete is None: - delete = True - - if name is None: - # We realpath here because some systems have their default tmpdir - # symlinked to another directory. This tends to confuse build - # scripts, so we canonicalize the path by traversing potential - # symlinks here. - name = os.path.realpath(tempfile.mkdtemp(prefix="pip-build-")) - # If we were not given an explicit directory, and we were not given - # an explicit delete option, then we'll default to deleting. - if delete is None: - delete = True - - self.name = name - self.delete = delete - - def __repr__(self): - return "<{} {!r}>".format(self.__class__.__name__, self.name) - - def __enter__(self): - return self.name - - def __exit__(self, exc, value, tb): - self.cleanup() - - def cleanup(self): - if self.delete: - rmtree(self.name) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/deprecation.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/deprecation.py deleted file mode 100644 index c3f799e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/deprecation.py +++ /dev/null @@ -1,76 +0,0 @@ -""" -A module that implements tooling to enable easy warnings about deprecations. -""" -from __future__ import absolute_import - -import logging -import warnings - - -class PipDeprecationWarning(Warning): - pass - - -class Pending(object): - pass - - -class RemovedInPip10Warning(PipDeprecationWarning): - pass - - -class RemovedInPip11Warning(PipDeprecationWarning, Pending): - pass - - -class Python26DeprecationWarning(PipDeprecationWarning): - pass - - -# Warnings <-> Logging Integration - - -_warnings_showwarning = None - - -def _showwarning(message, category, filename, lineno, file=None, line=None): - if file is not None: - if _warnings_showwarning is not None: - _warnings_showwarning( - message, category, filename, lineno, file, line, - ) - else: - if issubclass(category, PipDeprecationWarning): - # We use a specially named logger which will handle all of the - # deprecation messages for pip. - logger = logging.getLogger("pip.deprecations") - - # This is purposely using the % formatter here instead of letting - # the logging module handle the interpolation. This is because we - # want it to appear as if someone typed this entire message out. - log_message = "DEPRECATION: %s" % message - - # PipDeprecationWarnings that are Pending still have at least 2 - # versions to go until they are removed so they can just be - # warnings. Otherwise, they will be removed in the very next - # version of pip. We want these to be more obvious so we use the - # ERROR logging level. - if issubclass(category, Pending): - logger.warning(log_message) - else: - logger.error(log_message) - else: - _warnings_showwarning( - message, category, filename, lineno, file, line, - ) - - -def install_warning_logger(): - # Enable our Deprecation Warnings - warnings.simplefilter("default", PipDeprecationWarning, append=True) - - global _warnings_showwarning - - if _warnings_showwarning is None: - _warnings_showwarning = warnings.showwarning - warnings.showwarning = _showwarning diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/encoding.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/encoding.py deleted file mode 100644 index 2483168..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/encoding.py +++ /dev/null @@ -1,31 +0,0 @@ -import codecs -import locale -import re - - -BOMS = [ - (codecs.BOM_UTF8, 'utf8'), - (codecs.BOM_UTF16, 'utf16'), - (codecs.BOM_UTF16_BE, 'utf16-be'), - (codecs.BOM_UTF16_LE, 'utf16-le'), - (codecs.BOM_UTF32, 'utf32'), - (codecs.BOM_UTF32_BE, 'utf32-be'), - (codecs.BOM_UTF32_LE, 'utf32-le'), -] - -ENCODING_RE = re.compile(b'coding[:=]\s*([-\w.]+)') - - -def auto_decode(data): - """Check a bytes string for a BOM to correctly detect the encoding - - Fallback to locale.getpreferredencoding(False) like open() on Python3""" - for bom, encoding in BOMS: - if data.startswith(bom): - return data[len(bom):].decode(encoding) - # Lets check the first two lines as in PEP263 - for line in data.split(b'\n')[:2]: - if line[0:1] == b'#' and ENCODING_RE.search(line): - encoding = ENCODING_RE.search(line).groups()[0].decode('ascii') - return data.decode(encoding) - return data.decode(locale.getpreferredencoding(False)) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/filesystem.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/filesystem.py deleted file mode 100644 index 25ad516..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/filesystem.py +++ /dev/null @@ -1,28 +0,0 @@ -import os -import os.path - -from pip.compat import get_path_uid - - -def check_path_owner(path): - # If we don't have a way to check the effective uid of this process, then - # we'll just assume that we own the directory. - if not hasattr(os, "geteuid"): - return True - - previous = None - while path != previous: - if os.path.lexists(path): - # Check if path is writable by current user. - if os.geteuid() == 0: - # Special handling for root user in order to handle properly - # cases where users use sudo without -H flag. - try: - path_uid = get_path_uid(path) - except OSError: - return False - return path_uid == 0 - else: - return os.access(path, os.W_OK) - else: - previous, path = path, os.path.dirname(path) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/glibc.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/glibc.py deleted file mode 100644 index 7847885..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/glibc.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import absolute_import - -import re -import ctypes -import platform -import warnings - - -def glibc_version_string(): - "Returns glibc version string, or None if not using glibc." - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -# Separated out from have_compatible_glibc for easier unit testing -def check_glibc_version(version_str, required_major, minimum_minor): - # Parse string and check against requested version. - # - # We use a regexp instead of str.split because we want to discard any - # random junk that might come after the minor version -- this might happen - # in patched/forked versions of glibc (e.g. Linaro's version of glibc - # uses version strings like "2.20-2014.11"). See gh-3588. - m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) - if not m: - warnings.warn("Expected glibc version with 2 components major.minor," - " got: %s" % version_str, RuntimeWarning) - return False - return (int(m.group("major")) == required_major and - int(m.group("minor")) >= minimum_minor) - - -def have_compatible_glibc(required_major, minimum_minor): - version_str = glibc_version_string() - if version_str is None: - return False - return check_glibc_version(version_str, required_major, minimum_minor) - - -# platform.libc_ver regularly returns completely nonsensical glibc -# versions. E.g. on my computer, platform says: -# -# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.7') -# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.9') -# -# But the truth is: -# -# ~$ ldd --version -# ldd (Debian GLIBC 2.22-11) 2.22 -# -# This is unfortunate, because it means that the linehaul data on libc -# versions that was generated by pip 8.1.2 and earlier is useless and -# misleading. Solution: instead of using platform, use our code that actually -# works. -def libc_ver(): - glibc_version = glibc_version_string() - if glibc_version is None: - # For non-glibc platforms, fall back on platform.libc_ver - return platform.libc_ver() - else: - return ("glibc", glibc_version) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/hashes.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/hashes.py deleted file mode 100644 index 9602970..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/hashes.py +++ /dev/null @@ -1,92 +0,0 @@ -from __future__ import absolute_import - -import hashlib - -from pip.exceptions import HashMismatch, HashMissing, InstallationError -from pip.utils import read_chunks -from pip._vendor.six import iteritems, iterkeys, itervalues - - -# The recommended hash algo of the moment. Change this whenever the state of -# the art changes; it won't hurt backward compatibility. -FAVORITE_HASH = 'sha256' - - -# Names of hashlib algorithms allowed by the --hash option and ``pip hash`` -# Currently, those are the ones at least as collision-resistant as sha256. -STRONG_HASHES = ['sha256', 'sha384', 'sha512'] - - -class Hashes(object): - """A wrapper that builds multiple hashes at once and checks them against - known-good values - - """ - def __init__(self, hashes=None): - """ - :param hashes: A dict of algorithm names pointing to lists of allowed - hex digests - """ - self._allowed = {} if hashes is None else hashes - - def check_against_chunks(self, chunks): - """Check good hashes against ones built from iterable of chunks of - data. - - Raise HashMismatch if none match. - - """ - gots = {} - for hash_name in iterkeys(self._allowed): - try: - gots[hash_name] = hashlib.new(hash_name) - except (ValueError, TypeError): - raise InstallationError('Unknown hash name: %s' % hash_name) - - for chunk in chunks: - for hash in itervalues(gots): - hash.update(chunk) - - for hash_name, got in iteritems(gots): - if got.hexdigest() in self._allowed[hash_name]: - return - self._raise(gots) - - def _raise(self, gots): - raise HashMismatch(self._allowed, gots) - - def check_against_file(self, file): - """Check good hashes against a file-like object - - Raise HashMismatch if none match. - - """ - return self.check_against_chunks(read_chunks(file)) - - def check_against_path(self, path): - with open(path, 'rb') as file: - return self.check_against_file(file) - - def __nonzero__(self): - """Return whether I know any known-good hashes.""" - return bool(self._allowed) - - def __bool__(self): - return self.__nonzero__() - - -class MissingHashes(Hashes): - """A workalike for Hashes used when we're missing a hash for a requirement - - It computes the actual hash of the requirement and raises a HashMissing - exception showing it to the user. - - """ - def __init__(self): - """Don't offer the ``hashes`` kwarg.""" - # Pass our favorite hash in to generate a "gotten hash". With the - # empty list, it will never match, so an error will always raise. - super(MissingHashes, self).__init__(hashes={FAVORITE_HASH: []}) - - def _raise(self, gots): - raise HashMissing(gots[FAVORITE_HASH].hexdigest()) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/logging.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/logging.py deleted file mode 100644 index 1c1053a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/logging.py +++ /dev/null @@ -1,130 +0,0 @@ -from __future__ import absolute_import - -import contextlib -import logging -import logging.handlers -import os - -try: - import threading -except ImportError: - import dummy_threading as threading - -from pip.compat import WINDOWS -from pip.utils import ensure_dir - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - - -_log_state = threading.local() -_log_state.indentation = 0 - - -@contextlib.contextmanager -def indent_log(num=2): - """ - A context manager which will cause the log output to be indented for any - log messages emitted inside it. - """ - _log_state.indentation += num - try: - yield - finally: - _log_state.indentation -= num - - -def get_indentation(): - return getattr(_log_state, 'indentation', 0) - - -class IndentingFormatter(logging.Formatter): - - def format(self, record): - """ - Calls the standard formatter, but will indent all of the log messages - by our current indentation level. - """ - formatted = logging.Formatter.format(self, record) - formatted = "".join([ - (" " * get_indentation()) + line - for line in formatted.splitlines(True) - ]) - return formatted - - -def _color_wrap(*colors): - def wrapped(inp): - return "".join(list(colors) + [inp, colorama.Style.RESET_ALL]) - return wrapped - - -class ColorizedStreamHandler(logging.StreamHandler): - - # Don't build up a list of colors if we don't have colorama - if colorama: - COLORS = [ - # This needs to be in order from highest logging level to lowest. - (logging.ERROR, _color_wrap(colorama.Fore.RED)), - (logging.WARNING, _color_wrap(colorama.Fore.YELLOW)), - ] - else: - COLORS = [] - - def __init__(self, stream=None): - logging.StreamHandler.__init__(self, stream) - - if WINDOWS and colorama: - self.stream = colorama.AnsiToWin32(self.stream) - - def should_color(self): - # Don't colorize things if we do not have colorama - if not colorama: - return False - - real_stream = ( - self.stream if not isinstance(self.stream, colorama.AnsiToWin32) - else self.stream.wrapped - ) - - # If the stream is a tty we should color it - if hasattr(real_stream, "isatty") and real_stream.isatty(): - return True - - # If we have an ASNI term we should color it - if os.environ.get("TERM") == "ANSI": - return True - - # If anything else we should not color it - return False - - def format(self, record): - msg = logging.StreamHandler.format(self, record) - - if self.should_color(): - for level, color in self.COLORS: - if record.levelno >= level: - msg = color(msg) - break - - return msg - - -class BetterRotatingFileHandler(logging.handlers.RotatingFileHandler): - - def _open(self): - ensure_dir(os.path.dirname(self.baseFilename)) - return logging.handlers.RotatingFileHandler._open(self) - - -class MaxLevelFilter(logging.Filter): - - def __init__(self, level): - self.level = level - - def filter(self, record): - return record.levelno < self.level diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/outdated.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/outdated.py deleted file mode 100644 index 2164cc3..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/outdated.py +++ /dev/null @@ -1,162 +0,0 @@ -from __future__ import absolute_import - -import datetime -import json -import logging -import os.path -import sys - -from pip._vendor import lockfile -from pip._vendor.packaging import version as packaging_version - -from pip.compat import total_seconds, WINDOWS -from pip.models import PyPI -from pip.locations import USER_CACHE_DIR, running_under_virtualenv -from pip.utils import ensure_dir, get_installed_version -from pip.utils.filesystem import check_path_owner - - -SELFCHECK_DATE_FMT = "%Y-%m-%dT%H:%M:%SZ" - - -logger = logging.getLogger(__name__) - - -class VirtualenvSelfCheckState(object): - def __init__(self): - self.statefile_path = os.path.join(sys.prefix, "pip-selfcheck.json") - - # Load the existing state - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile) - except (IOError, ValueError): - self.state = {} - - def save(self, pypi_version, current_time): - # Attempt to write out our version check file - with open(self.statefile_path, "w") as statefile: - json.dump( - { - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), - "pypi_version": pypi_version, - }, - statefile, - sort_keys=True, - separators=(",", ":") - ) - - -class GlobalSelfCheckState(object): - def __init__(self): - self.statefile_path = os.path.join(USER_CACHE_DIR, "selfcheck.json") - - # Load the existing state - try: - with open(self.statefile_path) as statefile: - self.state = json.load(statefile)[sys.prefix] - except (IOError, ValueError, KeyError): - self.state = {} - - def save(self, pypi_version, current_time): - # Check to make sure that we own the directory - if not check_path_owner(os.path.dirname(self.statefile_path)): - return - - # Now that we've ensured the directory is owned by this user, we'll go - # ahead and make sure that all our directories are created. - ensure_dir(os.path.dirname(self.statefile_path)) - - # Attempt to write out our version check file - with lockfile.LockFile(self.statefile_path): - if os.path.exists(self.statefile_path): - with open(self.statefile_path) as statefile: - state = json.load(statefile) - else: - state = {} - - state[sys.prefix] = { - "last_check": current_time.strftime(SELFCHECK_DATE_FMT), - "pypi_version": pypi_version, - } - - with open(self.statefile_path, "w") as statefile: - json.dump(state, statefile, sort_keys=True, - separators=(",", ":")) - - -def load_selfcheck_statefile(): - if running_under_virtualenv(): - return VirtualenvSelfCheckState() - else: - return GlobalSelfCheckState() - - -def pip_version_check(session): - """Check for an update for pip. - - Limit the frequency of checks to once per week. State is stored either in - the active virtualenv or in the user's USER_CACHE_DIR keyed off the prefix - of the pip script path. - """ - installed_version = get_installed_version("pip") - if installed_version is None: - return - - pip_version = packaging_version.parse(installed_version) - pypi_version = None - - try: - state = load_selfcheck_statefile() - - current_time = datetime.datetime.utcnow() - # Determine if we need to refresh the state - if "last_check" in state.state and "pypi_version" in state.state: - last_check = datetime.datetime.strptime( - state.state["last_check"], - SELFCHECK_DATE_FMT - ) - if total_seconds(current_time - last_check) < 7 * 24 * 60 * 60: - pypi_version = state.state["pypi_version"] - - # Refresh the version if we need to or just see if we need to warn - if pypi_version is None: - resp = session.get( - PyPI.pip_json_url, - headers={"Accept": "application/json"}, - ) - resp.raise_for_status() - pypi_version = [ - v for v in sorted( - list(resp.json()["releases"]), - key=packaging_version.parse, - ) - if not packaging_version.parse(v).is_prerelease - ][-1] - - # save that we've performed a check - state.save(pypi_version, current_time) - - remote_version = packaging_version.parse(pypi_version) - - # Determine if our pypi_version is older - if (pip_version < remote_version and - pip_version.base_version != remote_version.base_version): - # Advise "python -m pip" on Windows to avoid issues - # with overwriting pip.exe. - if WINDOWS: - pip_cmd = "python -m pip" - else: - pip_cmd = "pip" - logger.warning( - "You are using pip version %s, however version %s is " - "available.\nYou should consider upgrading via the " - "'%s install --upgrade pip' command.", - pip_version, pypi_version, pip_cmd - ) - - except Exception: - logger.debug( - "There was an error checking the latest version of pip", - exc_info=True, - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/packaging.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/packaging.py deleted file mode 100644 index e93b20d..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/packaging.py +++ /dev/null @@ -1,63 +0,0 @@ -from __future__ import absolute_import - -from email.parser import FeedParser - -import logging -import sys - -from pip._vendor.packaging import specifiers -from pip._vendor.packaging import version -from pip._vendor import pkg_resources - -from pip import exceptions - -logger = logging.getLogger(__name__) - - -def check_requires_python(requires_python): - """ - Check if the python version in use match the `requires_python` specifier. - - Returns `True` if the version of python in use matches the requirement. - Returns `False` if the version of python in use does not matches the - requirement. - - Raises an InvalidSpecifier if `requires_python` have an invalid format. - """ - if requires_python is None: - # The package provides no information - return True - requires_python_specifier = specifiers.SpecifierSet(requires_python) - - # We only use major.minor.micro - python_version = version.parse('.'.join(map(str, sys.version_info[:3]))) - return python_version in requires_python_specifier - - -def get_metadata(dist): - if (isinstance(dist, pkg_resources.DistInfoDistribution) and - dist.has_metadata('METADATA')): - return dist.get_metadata('METADATA') - elif dist.has_metadata('PKG-INFO'): - return dist.get_metadata('PKG-INFO') - - -def check_dist_requires_python(dist): - metadata = get_metadata(dist) - feed_parser = FeedParser() - feed_parser.feed(metadata) - pkg_info_dict = feed_parser.close() - requires_python = pkg_info_dict.get('Requires-Python') - try: - if not check_requires_python(requires_python): - raise exceptions.UnsupportedPythonVersion( - "%s requires Python '%s' but the running Python is %s" % ( - dist.project_name, - requires_python, - '.'.join(map(str, sys.version_info[:3])),) - ) - except specifiers.InvalidSpecifier as e: - logger.warning( - "Package %s has an invalid Requires-Python entry %s - %s" % ( - dist.project_name, requires_python, e)) - return diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/setuptools_build.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/setuptools_build.py deleted file mode 100644 index 03973e9..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/setuptools_build.py +++ /dev/null @@ -1,8 +0,0 @@ -# Shim to wrap setup.py invocation with setuptools -SETUPTOOLS_SHIM = ( - "import setuptools, tokenize;__file__=%r;" - "f=getattr(tokenize, 'open', open)(__file__);" - "code=f.read().replace('\\r\\n', '\\n');" - "f.close();" - "exec(compile(code, __file__, 'exec'))" -) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/utils/ui.py b/classifier/myenv/lib/python3.6/site-packages/pip/utils/ui.py deleted file mode 100644 index bba73e3..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/utils/ui.py +++ /dev/null @@ -1,344 +0,0 @@ -from __future__ import absolute_import -from __future__ import division - -import itertools -import sys -from signal import signal, SIGINT, default_int_handler -import time -import contextlib -import logging - -from pip.compat import WINDOWS -from pip.utils import format_size -from pip.utils.logging import get_indentation -from pip._vendor import six -from pip._vendor.progress.bar import Bar, IncrementalBar -from pip._vendor.progress.helpers import (WritelnMixin, - HIDE_CURSOR, SHOW_CURSOR) -from pip._vendor.progress.spinner import Spinner - -try: - from pip._vendor import colorama -# Lots of different errors can come from this, including SystemError and -# ImportError. -except Exception: - colorama = None - -logger = logging.getLogger(__name__) - - -def _select_progress_class(preferred, fallback): - encoding = getattr(preferred.file, "encoding", None) - - # If we don't know what encoding this file is in, then we'll just assume - # that it doesn't support unicode and use the ASCII bar. - if not encoding: - return fallback - - # Collect all of the possible characters we want to use with the preferred - # bar. - characters = [ - getattr(preferred, "empty_fill", six.text_type()), - getattr(preferred, "fill", six.text_type()), - ] - characters += list(getattr(preferred, "phases", [])) - - # Try to decode the characters we're using for the bar using the encoding - # of the given file, if this works then we'll assume that we can use the - # fancier bar and if not we'll fall back to the plaintext bar. - try: - six.text_type().join(characters).encode(encoding) - except UnicodeEncodeError: - return fallback - else: - return preferred - - -_BaseBar = _select_progress_class(IncrementalBar, Bar) - - -class InterruptibleMixin(object): - """ - Helper to ensure that self.finish() gets called on keyboard interrupt. - - This allows downloads to be interrupted without leaving temporary state - (like hidden cursors) behind. - - This class is similar to the progress library's existing SigIntMixin - helper, but as of version 1.2, that helper has the following problems: - - 1. It calls sys.exit(). - 2. It discards the existing SIGINT handler completely. - 3. It leaves its own handler in place even after an uninterrupted finish, - which will have unexpected delayed effects if the user triggers an - unrelated keyboard interrupt some time after a progress-displaying - download has already completed, for example. - """ - - def __init__(self, *args, **kwargs): - """ - Save the original SIGINT handler for later. - """ - super(InterruptibleMixin, self).__init__(*args, **kwargs) - - self.original_handler = signal(SIGINT, self.handle_sigint) - - # If signal() returns None, the previous handler was not installed from - # Python, and we cannot restore it. This probably should not happen, - # but if it does, we must restore something sensible instead, at least. - # The least bad option should be Python's default SIGINT handler, which - # just raises KeyboardInterrupt. - if self.original_handler is None: - self.original_handler = default_int_handler - - def finish(self): - """ - Restore the original SIGINT handler after finishing. - - This should happen regardless of whether the progress display finishes - normally, or gets interrupted. - """ - super(InterruptibleMixin, self).finish() - signal(SIGINT, self.original_handler) - - def handle_sigint(self, signum, frame): - """ - Call self.finish() before delegating to the original SIGINT handler. - - This handler should only be in place while the progress display is - active. - """ - self.finish() - self.original_handler(signum, frame) - - -class DownloadProgressMixin(object): - - def __init__(self, *args, **kwargs): - super(DownloadProgressMixin, self).__init__(*args, **kwargs) - self.message = (" " * (get_indentation() + 2)) + self.message - - @property - def downloaded(self): - return format_size(self.index) - - @property - def download_speed(self): - # Avoid zero division errors... - if self.avg == 0.0: - return "..." - return format_size(1 / self.avg) + "/s" - - @property - def pretty_eta(self): - if self.eta: - return "eta %s" % self.eta_td - return "" - - def iter(self, it, n=1): - for x in it: - yield x - self.next(n) - self.finish() - - -class WindowsMixin(object): - - def __init__(self, *args, **kwargs): - # The Windows terminal does not support the hide/show cursor ANSI codes - # even with colorama. So we'll ensure that hide_cursor is False on - # Windows. - # This call neds to go before the super() call, so that hide_cursor - # is set in time. The base progress bar class writes the "hide cursor" - # code to the terminal in its init, so if we don't set this soon - # enough, we get a "hide" with no corresponding "show"... - if WINDOWS and self.hide_cursor: - self.hide_cursor = False - - super(WindowsMixin, self).__init__(*args, **kwargs) - - # Check if we are running on Windows and we have the colorama module, - # if we do then wrap our file with it. - if WINDOWS and colorama: - self.file = colorama.AnsiToWin32(self.file) - # The progress code expects to be able to call self.file.isatty() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.isatty = lambda: self.file.wrapped.isatty() - # The progress code expects to be able to call self.file.flush() - # but the colorama.AnsiToWin32() object doesn't have that, so we'll - # add it. - self.file.flush = lambda: self.file.wrapped.flush() - - -class DownloadProgressBar(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin, _BaseBar): - - file = sys.stdout - message = "%(percent)d%%" - suffix = "%(downloaded)s %(download_speed)s %(pretty_eta)s" - - -class DownloadProgressSpinner(WindowsMixin, InterruptibleMixin, - DownloadProgressMixin, WritelnMixin, Spinner): - - file = sys.stdout - suffix = "%(downloaded)s %(download_speed)s" - - def next_phase(self): - if not hasattr(self, "_phaser"): - self._phaser = itertools.cycle(self.phases) - return next(self._phaser) - - def update(self): - message = self.message % self - phase = self.next_phase() - suffix = self.suffix % self - line = ''.join([ - message, - " " if message else "", - phase, - " " if suffix else "", - suffix, - ]) - - self.writeln(line) - - -################################################################ -# Generic "something is happening" spinners -# -# We don't even try using progress.spinner.Spinner here because it's actually -# simpler to reimplement from scratch than to coerce their code into doing -# what we need. -################################################################ - -@contextlib.contextmanager -def hidden_cursor(file): - # The Windows terminal does not support the hide/show cursor ANSI codes, - # even via colorama. So don't even try. - if WINDOWS: - yield - # We don't want to clutter the output with control characters if we're - # writing to a file, or if the user is running with --quiet. - # See https://github.com/pypa/pip/issues/3418 - elif not file.isatty() or logger.getEffectiveLevel() > logging.INFO: - yield - else: - file.write(HIDE_CURSOR) - try: - yield - finally: - file.write(SHOW_CURSOR) - - -class RateLimiter(object): - def __init__(self, min_update_interval_seconds): - self._min_update_interval_seconds = min_update_interval_seconds - self._last_update = 0 - - def ready(self): - now = time.time() - delta = now - self._last_update - return delta >= self._min_update_interval_seconds - - def reset(self): - self._last_update = time.time() - - -class InteractiveSpinner(object): - def __init__(self, message, file=None, spin_chars="-\\|/", - # Empirically, 8 updates/second looks nice - min_update_interval_seconds=0.125): - self._message = message - if file is None: - file = sys.stdout - self._file = file - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._finished = False - - self._spin_cycle = itertools.cycle(spin_chars) - - self._file.write(" " * get_indentation() + self._message + " ... ") - self._width = 0 - - def _write(self, status): - assert not self._finished - # Erase what we wrote before by backspacing to the beginning, writing - # spaces to overwrite the old text, and then backspacing again - backup = "\b" * self._width - self._file.write(backup + " " * self._width + backup) - # Now we have a blank slate to add our status - self._file.write(status) - self._width = len(status) - self._file.flush() - self._rate_limiter.reset() - - def spin(self): - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._write(next(self._spin_cycle)) - - def finish(self, final_status): - if self._finished: - return - self._write(final_status) - self._file.write("\n") - self._file.flush() - self._finished = True - - -# Used for dumb terminals, non-interactive installs (no tty), etc. -# We still print updates occasionally (once every 60 seconds by default) to -# act as a keep-alive for systems like Travis-CI that take lack-of-output as -# an indication that a task has frozen. -class NonInteractiveSpinner(object): - def __init__(self, message, min_update_interval_seconds=60): - self._message = message - self._finished = False - self._rate_limiter = RateLimiter(min_update_interval_seconds) - self._update("started") - - def _update(self, status): - assert not self._finished - self._rate_limiter.reset() - logger.info("%s: %s", self._message, status) - - def spin(self): - if self._finished: - return - if not self._rate_limiter.ready(): - return - self._update("still running...") - - def finish(self, final_status): - if self._finished: - return - self._update("finished with status '%s'" % (final_status,)) - self._finished = True - - -@contextlib.contextmanager -def open_spinner(message): - # Interactive spinner goes directly to sys.stdout rather than being routed - # through the logging system, but it acts like it has level INFO, - # i.e. it's only displayed if we're at level INFO or better. - # Non-interactive spinner goes through the logging system, so it is always - # in sync with logging configuration. - if sys.stdout.isatty() and logger.getEffectiveLevel() <= logging.INFO: - spinner = InteractiveSpinner(message) - else: - spinner = NonInteractiveSpinner(message) - try: - with hidden_cursor(sys.stdout): - yield spinner - except KeyboardInterrupt: - spinner.finish("canceled") - raise - except Exception: - spinner.finish("error") - raise - else: - spinner.finish("done") diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__init__.py deleted file mode 100644 index 8d3dbb2..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__init__.py +++ /dev/null @@ -1,366 +0,0 @@ -"""Handles all VCS (version control) support""" -from __future__ import absolute_import - -import errno -import logging -import os -import shutil -import sys - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip.exceptions import BadCommand -from pip.utils import (display_path, backup_dir, call_subprocess, - rmtree, ask_path_exists) - - -__all__ = ['vcs', 'get_src_requirement'] - - -logger = logging.getLogger(__name__) - - -class VcsSupport(object): - _registry = {} - schemes = ['ssh', 'git', 'hg', 'bzr', 'sftp', 'svn'] - - def __init__(self): - # Register more schemes with urlparse for various version control - # systems - urllib_parse.uses_netloc.extend(self.schemes) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(self.schemes) - super(VcsSupport, self).__init__() - - def __iter__(self): - return self._registry.__iter__() - - @property - def backends(self): - return list(self._registry.values()) - - @property - def dirnames(self): - return [backend.dirname for backend in self.backends] - - @property - def all_schemes(self): - schemes = [] - for backend in self.backends: - schemes.extend(backend.schemes) - return schemes - - def register(self, cls): - if not hasattr(cls, 'name'): - logger.warning('Cannot register VCS %s', cls.__name__) - return - if cls.name not in self._registry: - self._registry[cls.name] = cls - logger.debug('Registered VCS backend: %s', cls.name) - - def unregister(self, cls=None, name=None): - if name in self._registry: - del self._registry[name] - elif cls in self._registry.values(): - del self._registry[cls.name] - else: - logger.warning('Cannot unregister because no class or name given') - - def get_backend_name(self, location): - """ - Return the name of the version control backend if found at given - location, e.g. vcs.get_backend_name('/path/to/vcs/checkout') - """ - for vc_type in self._registry.values(): - if vc_type.controls_location(location): - logger.debug('Determine that %s uses VCS: %s', - location, vc_type.name) - return vc_type.name - return None - - def get_backend(self, name): - name = name.lower() - if name in self._registry: - return self._registry[name] - - def get_backend_from_location(self, location): - vc_type = self.get_backend_name(location) - if vc_type: - return self.get_backend(vc_type) - return None - - -vcs = VcsSupport() - - -class VersionControl(object): - name = '' - dirname = '' - # List of supported schemes for this Version Control - schemes = () - - def __init__(self, url=None, *args, **kwargs): - self.url = url - super(VersionControl, self).__init__(*args, **kwargs) - - def _is_local_repository(self, repo): - """ - posix absolute paths start with os.path.sep, - win32 ones start with drive (like c:\\folder) - """ - drive, tail = os.path.splitdrive(repo) - return repo.startswith(os.path.sep) or drive - - # See issue #1083 for why this method was introduced: - # https://github.com/pypa/pip/issues/1083 - def translate_egg_surname(self, surname): - # For example, Django has branches of the form "stable/1.7.x". - return surname.replace('/', '_') - - def export(self, location): - """ - Export the repository at the url to the destination location - i.e. only download the files, without vcs informations - """ - raise NotImplementedError - - def get_url_rev(self): - """ - Returns the correct repository URL and revision by parsing the given - repository URL - """ - error_message = ( - "Sorry, '%s' is a malformed VCS url. " - "The format is <vcs>+<protocol>://<url>, " - "e.g. svn+http://myrepo/svn/MyApp#egg=MyApp" - ) - assert '+' in self.url, error_message % self.url - url = self.url.split('+', 1)[1] - scheme, netloc, path, query, frag = urllib_parse.urlsplit(url) - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - url = urllib_parse.urlunsplit((scheme, netloc, path, query, '')) - return url, rev - - def get_info(self, location): - """ - Returns (url, revision), where both are strings - """ - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - return self.get_url(location), self.get_revision(location) - - def normalize_url(self, url): - """ - Normalize a URL for comparison by unquoting it and removing any - trailing slash. - """ - return urllib_parse.unquote(url).rstrip('/') - - def compare_urls(self, url1, url2): - """ - Compare two repo URLs for identity, ignoring incidental differences. - """ - return (self.normalize_url(url1) == self.normalize_url(url2)) - - def obtain(self, dest): - """ - Called when installing or updating an editable package, takes the - source path of the checkout. - """ - raise NotImplementedError - - def switch(self, dest, url, rev_options): - """ - Switch the repo at ``dest`` to point to ``URL``. - """ - raise NotImplementedError - - def update(self, dest, rev_options): - """ - Update an already-existing repo to the given ``rev_options``. - """ - raise NotImplementedError - - def check_version(self, dest, rev_options): - """ - Return True if the version is identical to what exists and - doesn't need to be updated. - """ - raise NotImplementedError - - def check_destination(self, dest, url, rev_options, rev_display): - """ - Prepare a location to receive a checkout/clone. - - Return True if the location is ready for (and requires) a - checkout/clone, False otherwise. - """ - checkout = True - prompt = False - if os.path.exists(dest): - checkout = False - if os.path.exists(os.path.join(dest, self.dirname)): - existing_url = self.get_url(dest) - if self.compare_urls(existing_url, url): - logger.debug( - '%s in %s exists, and has correct URL (%s)', - self.repo_name.title(), - display_path(dest), - url, - ) - if not self.check_version(dest, rev_options): - logger.info( - 'Updating %s %s%s', - display_path(dest), - self.repo_name, - rev_display, - ) - self.update(dest, rev_options) - else: - logger.info( - 'Skipping because already up-to-date.') - else: - logger.warning( - '%s %s in %s exists with URL %s', - self.name, - self.repo_name, - display_path(dest), - existing_url, - ) - prompt = ('(s)witch, (i)gnore, (w)ipe, (b)ackup ', - ('s', 'i', 'w', 'b')) - else: - logger.warning( - 'Directory %s already exists, and is not a %s %s.', - dest, - self.name, - self.repo_name, - ) - prompt = ('(i)gnore, (w)ipe, (b)ackup ', ('i', 'w', 'b')) - if prompt: - logger.warning( - 'The plan is to install the %s repository %s', - self.name, - url, - ) - response = ask_path_exists('What to do? %s' % prompt[0], - prompt[1]) - - if response == 's': - logger.info( - 'Switching %s %s to %s%s', - self.repo_name, - display_path(dest), - url, - rev_display, - ) - self.switch(dest, url, rev_options) - elif response == 'i': - # do nothing - pass - elif response == 'w': - logger.warning('Deleting %s', display_path(dest)) - rmtree(dest) - checkout = True - elif response == 'b': - dest_dir = backup_dir(dest) - logger.warning( - 'Backing up %s to %s', display_path(dest), dest_dir, - ) - shutil.move(dest, dest_dir) - checkout = True - elif response == 'a': - sys.exit(-1) - return checkout - - def unpack(self, location): - """ - Clean up current location and download the url repository - (and vcs infos) into location - """ - if os.path.exists(location): - rmtree(location) - self.obtain(location) - - def get_src_requirement(self, dist, location): - """ - Return a string representing the requirement needed to - redownload the files currently present in location, something - like: - {repository_url}@{revision}#egg={project_name}-{version_identifier} - """ - raise NotImplementedError - - def get_url(self, location): - """ - Return the url used at location - Used in get_info or check_destination - """ - raise NotImplementedError - - def get_revision(self, location): - """ - Return the current revision of the files at location - Used in get_info - """ - raise NotImplementedError - - def run_command(self, cmd, show_stdout=True, cwd=None, - on_returncode='raise', - command_desc=None, - extra_environ=None, spinner=None): - """ - Run a VCS subcommand - This is simply a wrapper around call_subprocess that adds the VCS - command name, and checks that the VCS is available - """ - cmd = [self.name] + cmd - try: - return call_subprocess(cmd, show_stdout, cwd, - on_returncode, - command_desc, extra_environ, - spinner) - except OSError as e: - # errno.ENOENT = no such file or directory - # In other words, the VCS executable isn't available - if e.errno == errno.ENOENT: - raise BadCommand('Cannot find command %r' % self.name) - else: - raise # re-raise exception if a different error occurred - - @classmethod - def controls_location(cls, location): - """ - Check if a location is controlled by the vcs. - It is meant to be overridden to implement smarter detection - mechanisms for specific vcs. - """ - logger.debug('Checking in %s for %s (%s)...', - location, cls.dirname, cls.name) - path = os.path.join(location, cls.dirname) - return os.path.exists(path) - - -def get_src_requirement(dist, location): - version_control = vcs.get_backend_from_location(location) - if version_control: - try: - return version_control().get_src_requirement(dist, - location) - except BadCommand: - logger.warning( - 'cannot determine version of editable source in %s ' - '(%s command not found in path)', - location, - version_control.name, - ) - return dist.as_requirement() - logger.warning( - 'cannot determine version of editable source in %s (is not SVN ' - 'checkout, Git clone, Mercurial clone or Bazaar branch)', - location, - ) - return dist.as_requirement() diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 104632c..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/bazaar.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/bazaar.cpython-36.pyc deleted file mode 100644 index 444fc99..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/bazaar.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/git.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/git.cpython-36.pyc deleted file mode 100644 index 51d6efc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/git.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/mercurial.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/mercurial.cpython-36.pyc deleted file mode 100644 index 9556155..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/mercurial.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/subversion.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/subversion.cpython-36.pyc deleted file mode 100644 index 8e820b3..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/__pycache__/subversion.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/bazaar.py b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/bazaar.py deleted file mode 100644 index 0f09584..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/bazaar.py +++ /dev/null @@ -1,116 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import tempfile - -# TODO: Get this into six.moves.urllib.parse -try: - from urllib import parse as urllib_parse -except ImportError: - import urlparse as urllib_parse - -from pip.utils import rmtree, display_path -from pip.vcs import vcs, VersionControl -from pip.download import path_to_url - - -logger = logging.getLogger(__name__) - - -class Bazaar(VersionControl): - name = 'bzr' - dirname = '.bzr' - repo_name = 'branch' - schemes = ( - 'bzr', 'bzr+http', 'bzr+https', 'bzr+ssh', 'bzr+sftp', 'bzr+ftp', - 'bzr+lp', - ) - - def __init__(self, url=None, *args, **kwargs): - super(Bazaar, self).__init__(url, *args, **kwargs) - # Python >= 2.7.4, 3.3 doesn't have uses_fragment or non_hierarchical - # Register lp but do not expose as a scheme to support bzr+lp. - if getattr(urllib_parse, 'uses_fragment', None): - urllib_parse.uses_fragment.extend(['lp']) - urllib_parse.non_hierarchical.extend(['lp']) - - def export(self, location): - """ - Export the Bazaar repository at the url to the destination location - """ - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) - if os.path.exists(location): - # Remove the location to make sure Bazaar can export it correctly - rmtree(location) - try: - self.run_command(['export', location], cwd=temp_dir, - show_stdout=False) - finally: - rmtree(temp_dir) - - def switch(self, dest, url, rev_options): - self.run_command(['switch', url], cwd=dest) - - def update(self, dest, rev_options): - self.run_command(['pull', '-q'] + rev_options, cwd=dest) - - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = ['-r', rev] - rev_display = ' (to revision %s)' % rev - else: - rev_options = [] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['branch', '-q'] + rev_options + [url, dest]) - - def get_url_rev(self): - # hotfix the URL scheme after removing bzr+ from bzr+ssh:// readd it - url, rev = super(Bazaar, self).get_url_rev() - if url.startswith('ssh://'): - url = 'bzr+' + url - return url, rev - - def get_url(self, location): - urls = self.run_command(['info'], show_stdout=False, cwd=location) - for line in urls.splitlines(): - line = line.strip() - for x in ('checkout of branch: ', - 'parent branch: '): - if line.startswith(x): - repo = line.split(x)[1] - if self._is_local_repository(repo): - return path_to_url(repo) - return repo - return None - - def get_revision(self, location): - revision = self.run_command( - ['revno'], show_stdout=False, cwd=location) - return revision.splitlines()[-1] - - def get_src_requirement(self, dist, location): - repo = self.get_url(location) - if not repo: - return None - if not repo.lower().startswith('bzr:'): - repo = 'bzr+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - current_rev = self.get_revision(location) - return '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) - - def check_version(self, dest, rev_options): - """Always assume the versions don't match""" - return False - - -vcs.register(Bazaar) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/git.py b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/git.py deleted file mode 100644 index 2187dd8..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/git.py +++ /dev/null @@ -1,300 +0,0 @@ -from __future__ import absolute_import - -import logging -import tempfile -import os.path - -from pip.compat import samefile -from pip.exceptions import BadCommand -from pip._vendor.six.moves.urllib import parse as urllib_parse -from pip._vendor.six.moves.urllib import request as urllib_request -from pip._vendor.packaging.version import parse as parse_version - -from pip.utils import display_path, rmtree -from pip.vcs import vcs, VersionControl - - -urlsplit = urllib_parse.urlsplit -urlunsplit = urllib_parse.urlunsplit - - -logger = logging.getLogger(__name__) - - -class Git(VersionControl): - name = 'git' - dirname = '.git' - repo_name = 'clone' - schemes = ( - 'git', 'git+http', 'git+https', 'git+ssh', 'git+git', 'git+file', - ) - - def __init__(self, url=None, *args, **kwargs): - - # Works around an apparent Git bug - # (see http://article.gmane.org/gmane.comp.version-control.git/146500) - if url: - scheme, netloc, path, query, fragment = urlsplit(url) - if scheme.endswith('file'): - initial_slashes = path[:-len(path.lstrip('/'))] - newpath = ( - initial_slashes + - urllib_request.url2pathname(path) - .replace('\\', '/').lstrip('/') - ) - url = urlunsplit((scheme, netloc, newpath, query, fragment)) - after_plus = scheme.find('+') + 1 - url = scheme[:after_plus] + urlunsplit( - (scheme[after_plus:], netloc, newpath, query, fragment), - ) - - super(Git, self).__init__(url, *args, **kwargs) - - def get_git_version(self): - VERSION_PFX = 'git version ' - version = self.run_command(['version'], show_stdout=False) - if version.startswith(VERSION_PFX): - version = version[len(VERSION_PFX):] - else: - version = '' - # get first 3 positions of the git version becasue - # on windows it is x.y.z.windows.t, and this parses as - # LegacyVersion which always smaller than a Version. - version = '.'.join(version.split('.')[:3]) - return parse_version(version) - - def export(self, location): - """Export the Git repository at the url to the destination location""" - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) - try: - if not location.endswith('/'): - location = location + '/' - self.run_command( - ['checkout-index', '-a', '-f', '--prefix', location], - show_stdout=False, cwd=temp_dir) - finally: - rmtree(temp_dir) - - def check_rev_options(self, rev, dest, rev_options): - """Check the revision options before checkout to compensate that tags - and branches may need origin/ as a prefix. - Returns the SHA1 of the branch or tag if found. - """ - revisions = self.get_short_refs(dest) - - origin_rev = 'origin/%s' % rev - if origin_rev in revisions: - # remote branch - return [revisions[origin_rev]] - elif rev in revisions: - # a local tag or branch name - return [revisions[rev]] - else: - logger.warning( - "Could not find a tag or branch '%s', assuming commit.", rev, - ) - return rev_options - - def check_version(self, dest, rev_options): - """ - Compare the current sha to the ref. ref may be a branch or tag name, - but current rev will always point to a sha. This means that a branch - or tag will never compare as True. So this ultimately only matches - against exact shas. - """ - return self.get_revision(dest).startswith(rev_options[0]) - - def switch(self, dest, url, rev_options): - self.run_command(['config', 'remote.origin.url', url], cwd=dest) - self.run_command(['checkout', '-q'] + rev_options, cwd=dest) - - self.update_submodules(dest) - - def update(self, dest, rev_options): - # First fetch changes from the default remote - if self.get_git_version() >= parse_version('1.9.0'): - # fetch tags in addition to everything else - self.run_command(['fetch', '-q', '--tags'], cwd=dest) - else: - self.run_command(['fetch', '-q'], cwd=dest) - # Then reset to wanted revision (maybe even origin/master) - if rev_options: - rev_options = self.check_rev_options( - rev_options[0], dest, rev_options, - ) - self.run_command(['reset', '--hard', '-q'] + rev_options, cwd=dest) - #: update submodules - self.update_submodules(dest) - - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = [rev] - rev_display = ' (to %s)' % rev - else: - rev_options = ['origin/master'] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Cloning %s%s to %s', url, rev_display, display_path(dest), - ) - self.run_command(['clone', '-q', url, dest]) - - if rev: - rev_options = self.check_rev_options(rev, dest, rev_options) - # Only do a checkout if rev_options differs from HEAD - if not self.check_version(dest, rev_options): - self.run_command( - ['checkout', '-q'] + rev_options, - cwd=dest, - ) - #: repo may contain submodules - self.update_submodules(dest) - - def get_url(self, location): - """Return URL of the first remote encountered.""" - remotes = self.run_command( - ['config', '--get-regexp', 'remote\..*\.url'], - show_stdout=False, cwd=location) - remotes = remotes.splitlines() - found_remote = remotes[0] - for remote in remotes: - if remote.startswith('remote.origin.url '): - found_remote = remote - break - url = found_remote.split(' ')[1] - return url.strip() - - def get_revision(self, location): - current_rev = self.run_command( - ['rev-parse', 'HEAD'], show_stdout=False, cwd=location) - return current_rev.strip() - - def get_full_refs(self, location): - """Yields tuples of (commit, ref) for branches and tags""" - output = self.run_command(['show-ref'], - show_stdout=False, cwd=location) - for line in output.strip().splitlines(): - commit, ref = line.split(' ', 1) - yield commit.strip(), ref.strip() - - def is_ref_remote(self, ref): - return ref.startswith('refs/remotes/') - - def is_ref_branch(self, ref): - return ref.startswith('refs/heads/') - - def is_ref_tag(self, ref): - return ref.startswith('refs/tags/') - - def is_ref_commit(self, ref): - """A ref is a commit sha if it is not anything else""" - return not any(( - self.is_ref_remote(ref), - self.is_ref_branch(ref), - self.is_ref_tag(ref), - )) - - # Should deprecate `get_refs` since it's ambiguous - def get_refs(self, location): - return self.get_short_refs(location) - - def get_short_refs(self, location): - """Return map of named refs (branches or tags) to commit hashes.""" - rv = {} - for commit, ref in self.get_full_refs(location): - ref_name = None - if self.is_ref_remote(ref): - ref_name = ref[len('refs/remotes/'):] - elif self.is_ref_branch(ref): - ref_name = ref[len('refs/heads/'):] - elif self.is_ref_tag(ref): - ref_name = ref[len('refs/tags/'):] - if ref_name is not None: - rv[ref_name] = commit - return rv - - def _get_subdirectory(self, location): - """Return the relative path of setup.py to the git repo root.""" - # find the repo root - git_dir = self.run_command(['rev-parse', '--git-dir'], - show_stdout=False, cwd=location).strip() - if not os.path.isabs(git_dir): - git_dir = os.path.join(location, git_dir) - root_dir = os.path.join(git_dir, '..') - # find setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - # relative path of setup.py to repo root - if samefile(root_dir, location): - return None - return os.path.relpath(location, root_dir) - - def get_src_requirement(self, dist, location): - repo = self.get_url(location) - if not repo.lower().startswith('git:'): - repo = 'git+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None - current_rev = self.get_revision(location) - req = '%s@%s#egg=%s' % (repo, current_rev, egg_project_name) - subdirectory = self._get_subdirectory(location) - if subdirectory: - req += '&subdirectory=' + subdirectory - return req - - def get_url_rev(self): - """ - Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. - That's required because although they use SSH they sometimes doesn't - work with a ssh:// scheme (e.g. Github). But we need a scheme for - parsing. Hence we remove it again afterwards and return it as a stub. - """ - if '://' not in self.url: - assert 'file:' not in self.url - self.url = self.url.replace('git+', 'git+ssh://') - url, rev = super(Git, self).get_url_rev() - url = url.replace('ssh://', '') - else: - url, rev = super(Git, self).get_url_rev() - - return url, rev - - def update_submodules(self, location): - if not os.path.exists(os.path.join(location, '.gitmodules')): - return - self.run_command( - ['submodule', 'update', '--init', '--recursive', '-q'], - cwd=location, - ) - - @classmethod - def controls_location(cls, location): - if super(Git, cls).controls_location(location): - return True - try: - r = cls().run_command(['rev-parse'], - cwd=location, - show_stdout=False, - on_returncode='ignore') - return not r - except BadCommand: - logger.debug("could not determine if %s is under git control " - "because git is not available", location) - return False - - -vcs.register(Git) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/mercurial.py b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/mercurial.py deleted file mode 100644 index 1aa83b9..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/mercurial.py +++ /dev/null @@ -1,103 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import tempfile - -from pip.utils import display_path, rmtree -from pip.vcs import vcs, VersionControl -from pip.download import path_to_url -from pip._vendor.six.moves import configparser - - -logger = logging.getLogger(__name__) - - -class Mercurial(VersionControl): - name = 'hg' - dirname = '.hg' - repo_name = 'clone' - schemes = ('hg', 'hg+http', 'hg+https', 'hg+ssh', 'hg+static-http') - - def export(self, location): - """Export the Hg repository at the url to the destination location""" - temp_dir = tempfile.mkdtemp('-export', 'pip-') - self.unpack(temp_dir) - try: - self.run_command( - ['archive', location], show_stdout=False, cwd=temp_dir) - finally: - rmtree(temp_dir) - - def switch(self, dest, url, rev_options): - repo_config = os.path.join(dest, self.dirname, 'hgrc') - config = configparser.SafeConfigParser() - try: - config.read(repo_config) - config.set('paths', 'default', url) - with open(repo_config, 'w') as config_file: - config.write(config_file) - except (OSError, configparser.NoSectionError) as exc: - logger.warning( - 'Could not switch Mercurial repository to %s: %s', url, exc, - ) - else: - self.run_command(['update', '-q'] + rev_options, cwd=dest) - - def update(self, dest, rev_options): - self.run_command(['pull', '-q'], cwd=dest) - self.run_command(['update', '-q'] + rev_options, cwd=dest) - - def obtain(self, dest): - url, rev = self.get_url_rev() - if rev: - rev_options = [rev] - rev_display = ' (to revision %s)' % rev - else: - rev_options = [] - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Cloning hg %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['clone', '--noupdate', '-q', url, dest]) - self.run_command(['update', '-q'] + rev_options, cwd=dest) - - def get_url(self, location): - url = self.run_command( - ['showconfig', 'paths.default'], - show_stdout=False, cwd=location).strip() - if self._is_local_repository(url): - url = path_to_url(url) - return url.strip() - - def get_revision(self, location): - current_revision = self.run_command( - ['parents', '--template={rev}'], - show_stdout=False, cwd=location).strip() - return current_revision - - def get_revision_hash(self, location): - current_rev_hash = self.run_command( - ['parents', '--template={node}'], - show_stdout=False, cwd=location).strip() - return current_rev_hash - - def get_src_requirement(self, dist, location): - repo = self.get_url(location) - if not repo.lower().startswith('hg:'): - repo = 'hg+' + repo - egg_project_name = dist.egg_name().split('-', 1)[0] - if not repo: - return None - current_rev_hash = self.get_revision_hash(location) - return '%s@%s#egg=%s' % (repo, current_rev_hash, egg_project_name) - - def check_version(self, dest, rev_options): - """Always assume the versions don't match""" - return False - -vcs.register(Mercurial) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/subversion.py b/classifier/myenv/lib/python3.6/site-packages/pip/vcs/subversion.py deleted file mode 100644 index 4b23156..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/vcs/subversion.py +++ /dev/null @@ -1,269 +0,0 @@ -from __future__ import absolute_import - -import logging -import os -import re - -from pip._vendor.six.moves.urllib import parse as urllib_parse - -from pip.index import Link -from pip.utils import rmtree, display_path -from pip.utils.logging import indent_log -from pip.vcs import vcs, VersionControl - -_svn_xml_url_re = re.compile('url="([^"]+)"') -_svn_rev_re = re.compile('committed-rev="(\d+)"') -_svn_url_re = re.compile(r'URL: (.+)') -_svn_revision_re = re.compile(r'Revision: (.+)') -_svn_info_xml_rev_re = re.compile(r'\s*revision="(\d+)"') -_svn_info_xml_url_re = re.compile(r'<url>(.*)</url>') - - -logger = logging.getLogger(__name__) - - -class Subversion(VersionControl): - name = 'svn' - dirname = '.svn' - repo_name = 'checkout' - schemes = ('svn', 'svn+ssh', 'svn+http', 'svn+https', 'svn+svn') - - def get_info(self, location): - """Returns (url, revision), where both are strings""" - assert not location.rstrip('/').endswith(self.dirname), \ - 'Bad directory: %s' % location - output = self.run_command( - ['info', location], - show_stdout=False, - extra_environ={'LANG': 'C'}, - ) - match = _svn_url_re.search(output) - if not match: - logger.warning( - 'Cannot determine URL of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return None, None - url = match.group(1).strip() - match = _svn_revision_re.search(output) - if not match: - logger.warning( - 'Cannot determine revision of svn checkout %s', - display_path(location), - ) - logger.debug('Output that cannot be parsed: \n%s', output) - return url, None - return url, match.group(1) - - def export(self, location): - """Export the svn repository at the url to the destination location""" - url, rev = self.get_url_rev() - rev_options = get_rev_options(url, rev) - url = self.remove_auth_from_url(url) - logger.info('Exporting svn repository %s to %s', url, location) - with indent_log(): - if os.path.exists(location): - # Subversion doesn't like to check out over an existing - # directory --force fixes this, but was only added in svn 1.5 - rmtree(location) - self.run_command( - ['export'] + rev_options + [url, location], - show_stdout=False) - - def switch(self, dest, url, rev_options): - self.run_command(['switch'] + rev_options + [url, dest]) - - def update(self, dest, rev_options): - self.run_command(['update'] + rev_options + [dest]) - - def obtain(self, dest): - url, rev = self.get_url_rev() - rev_options = get_rev_options(url, rev) - url = self.remove_auth_from_url(url) - if rev: - rev_display = ' (to revision %s)' % rev - else: - rev_display = '' - if self.check_destination(dest, url, rev_options, rev_display): - logger.info( - 'Checking out %s%s to %s', - url, - rev_display, - display_path(dest), - ) - self.run_command(['checkout', '-q'] + rev_options + [url, dest]) - - def get_location(self, dist, dependency_links): - for url in dependency_links: - egg_fragment = Link(url).egg_fragment - if not egg_fragment: - continue - if '-' in egg_fragment: - # FIXME: will this work when a package has - in the name? - key = '-'.join(egg_fragment.split('-')[:-1]).lower() - else: - key = egg_fragment - if key == dist.key: - return url.split('#', 1)[0] - return None - - def get_revision(self, location): - """ - Return the maximum revision for all files under a given location - """ - # Note: taken from setuptools.command.egg_info - revision = 0 - - for base, dirs, files in os.walk(location): - if self.dirname not in dirs: - dirs[:] = [] - continue # no sense walking uncontrolled subdirs - dirs.remove(self.dirname) - entries_fn = os.path.join(base, self.dirname, 'entries') - if not os.path.exists(entries_fn): - # FIXME: should we warn? - continue - - dirurl, localrev = self._get_svn_url_rev(base) - - if base == location: - base_url = dirurl + '/' # save the root url - elif not dirurl or not dirurl.startswith(base_url): - dirs[:] = [] - continue # not part of the same svn tree, skip it - revision = max(revision, localrev) - return revision - - def get_url_rev(self): - # hotfix the URL scheme after removing svn+ from svn+ssh:// readd it - url, rev = super(Subversion, self).get_url_rev() - if url.startswith('ssh://'): - url = 'svn+' + url - return url, rev - - def get_url(self, location): - # In cases where the source is in a subdirectory, not alongside - # setup.py we have to look up in the location until we find a real - # setup.py - orig_location = location - while not os.path.exists(os.path.join(location, 'setup.py')): - last_location = location - location = os.path.dirname(location) - if location == last_location: - # We've traversed up to the root of the filesystem without - # finding setup.py - logger.warning( - "Could not find setup.py for directory %s (tried all " - "parent directories)", - orig_location, - ) - return None - - return self._get_svn_url_rev(location)[0] - - def _get_svn_url_rev(self, location): - from pip.exceptions import InstallationError - - entries_path = os.path.join(location, self.dirname, 'entries') - if os.path.exists(entries_path): - with open(entries_path) as f: - data = f.read() - else: # subversion >= 1.7 does not have the 'entries' file - data = '' - - if (data.startswith('8') or - data.startswith('9') or - data.startswith('10')): - data = list(map(str.splitlines, data.split('\n\x0c\n'))) - del data[0][0] # get rid of the '8' - url = data[0][3] - revs = [int(d[9]) for d in data if len(d) > 9 and d[9]] + [0] - elif data.startswith('<?xml'): - match = _svn_xml_url_re.search(data) - if not match: - raise ValueError('Badly formatted data: %r' % data) - url = match.group(1) # get repository URL - revs = [int(m.group(1)) for m in _svn_rev_re.finditer(data)] + [0] - else: - try: - # subversion >= 1.7 - xml = self.run_command( - ['info', '--xml', location], - show_stdout=False, - ) - url = _svn_info_xml_url_re.search(xml).group(1) - revs = [ - int(m.group(1)) for m in _svn_info_xml_rev_re.finditer(xml) - ] - except InstallationError: - url, revs = None, [] - - if revs: - rev = max(revs) - else: - rev = 0 - - return url, rev - - def get_src_requirement(self, dist, location): - repo = self.get_url(location) - if repo is None: - return None - # FIXME: why not project name? - egg_project_name = dist.egg_name().split('-', 1)[0] - rev = self.get_revision(location) - return 'svn+%s@%s#egg=%s' % (repo, rev, egg_project_name) - - def check_version(self, dest, rev_options): - """Always assume the versions don't match""" - return False - - @staticmethod - def remove_auth_from_url(url): - # Return a copy of url with 'username:password@' removed. - # username/pass params are passed to subversion through flags - # and are not recognized in the url. - - # parsed url - purl = urllib_parse.urlsplit(url) - stripped_netloc = \ - purl.netloc.split('@')[-1] - - # stripped url - url_pieces = ( - purl.scheme, stripped_netloc, purl.path, purl.query, purl.fragment - ) - surl = urllib_parse.urlunsplit(url_pieces) - return surl - - -def get_rev_options(url, rev): - if rev: - rev_options = ['-r', rev] - else: - rev_options = [] - - r = urllib_parse.urlsplit(url) - if hasattr(r, 'username'): - # >= Python-2.5 - username, password = r.username, r.password - else: - netloc = r[1] - if '@' in netloc: - auth = netloc.split('@')[0] - if ':' in auth: - username, password = auth.split(':', 1) - else: - username, password = auth, None - else: - username, password = None, None - - if username: - rev_options += ['--username', username] - if password: - rev_options += ['--password', password] - return rev_options - - -vcs.register(Subversion) diff --git a/classifier/myenv/lib/python3.6/site-packages/pip/wheel.py b/classifier/myenv/lib/python3.6/site-packages/pip/wheel.py deleted file mode 100644 index 9ac9dff..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pip/wheel.py +++ /dev/null @@ -1,853 +0,0 @@ -""" -Support for installing and building the "wheel" binary package format. -""" -from __future__ import absolute_import - -import compileall -import csv -import errno -import functools -import hashlib -import logging -import os -import os.path -import re -import shutil -import stat -import sys -import tempfile -import warnings - -from base64 import urlsafe_b64encode -from email.parser import Parser - -from pip._vendor.six import StringIO - -import pip -from pip.compat import expanduser -from pip.download import path_to_url, unpack_url -from pip.exceptions import ( - InstallationError, InvalidWheelFilename, UnsupportedWheel) -from pip.locations import distutils_scheme, PIP_DELETE_MARKER_FILENAME -from pip import pep425tags -from pip.utils import ( - call_subprocess, ensure_dir, captured_stdout, rmtree, read_chunks, -) -from pip.utils.ui import open_spinner -from pip.utils.logging import indent_log -from pip.utils.setuptools_build import SETUPTOOLS_SHIM -from pip._vendor.distlib.scripts import ScriptMaker -from pip._vendor import pkg_resources -from pip._vendor.packaging.utils import canonicalize_name -from pip._vendor.six.moves import configparser - - -wheel_ext = '.whl' - -VERSION_COMPATIBLE = (1, 0) - - -logger = logging.getLogger(__name__) - - -class WheelCache(object): - """A cache of wheels for future installs.""" - - def __init__(self, cache_dir, format_control): - """Create a wheel cache. - - :param cache_dir: The root of the cache. - :param format_control: A pip.index.FormatControl object to limit - binaries being read from the cache. - """ - self._cache_dir = expanduser(cache_dir) if cache_dir else None - self._format_control = format_control - - def cached_wheel(self, link, package_name): - return cached_wheel( - self._cache_dir, link, self._format_control, package_name) - - -def _cache_for_link(cache_dir, link): - """ - Return a directory to store cached wheels in for link. - - Because there are M wheels for any one sdist, we provide a directory - to cache them in, and then consult that directory when looking up - cache hits. - - We only insert things into the cache if they have plausible version - numbers, so that we don't contaminate the cache with things that were not - unique. E.g. ./package might have dozens of installs done for it and build - a version of 0.0...and if we built and cached a wheel, we'd end up using - the same wheel even if the source has been edited. - - :param cache_dir: The cache_dir being used by pip. - :param link: The link of the sdist for which this will cache wheels. - """ - - # We want to generate an url to use as our cache key, we don't want to just - # re-use the URL because it might have other items in the fragment and we - # don't care about those. - key_parts = [link.url_without_fragment] - if link.hash_name is not None and link.hash is not None: - key_parts.append("=".join([link.hash_name, link.hash])) - key_url = "#".join(key_parts) - - # Encode our key url with sha224, we'll use this because it has similar - # security properties to sha256, but with a shorter total output (and thus - # less secure). However the differences don't make a lot of difference for - # our use case here. - hashed = hashlib.sha224(key_url.encode()).hexdigest() - - # We want to nest the directories some to prevent having a ton of top level - # directories where we might run out of sub directories on some FS. - parts = [hashed[:2], hashed[2:4], hashed[4:6], hashed[6:]] - - # Inside of the base location for cached wheels, expand our parts and join - # them all together. - return os.path.join(cache_dir, "wheels", *parts) - - -def cached_wheel(cache_dir, link, format_control, package_name): - if not cache_dir: - return link - if not link: - return link - if link.is_wheel: - return link - if not link.is_artifact: - return link - if not package_name: - return link - canonical_name = canonicalize_name(package_name) - formats = pip.index.fmt_ctl_formats(format_control, canonical_name) - if "binary" not in formats: - return link - root = _cache_for_link(cache_dir, link) - try: - wheel_names = os.listdir(root) - except OSError as e: - if e.errno in (errno.ENOENT, errno.ENOTDIR): - return link - raise - candidates = [] - for wheel_name in wheel_names: - try: - wheel = Wheel(wheel_name) - except InvalidWheelFilename: - continue - if not wheel.supported(): - # Built for a different python/arch/etc - continue - candidates.append((wheel.support_index_min(), wheel_name)) - if not candidates: - return link - candidates.sort() - path = os.path.join(root, candidates[0][1]) - return pip.index.Link(path_to_url(path)) - - -def rehash(path, algo='sha256', blocksize=1 << 20): - """Return (hash, length) for path using hashlib.new(algo)""" - h = hashlib.new(algo) - length = 0 - with open(path, 'rb') as f: - for block in read_chunks(f, size=blocksize): - length += len(block) - h.update(block) - digest = 'sha256=' + urlsafe_b64encode( - h.digest() - ).decode('latin1').rstrip('=') - return (digest, length) - - -def open_for_csv(name, mode): - if sys.version_info[0] < 3: - nl = {} - bin = 'b' - else: - nl = {'newline': ''} - bin = '' - return open(name, mode + bin, **nl) - - -def fix_script(path): - """Replace #!python with #!/path/to/python - Return True if file was changed.""" - # XXX RECORD hashes will need to be updated - if os.path.isfile(path): - with open(path, 'rb') as script: - firstline = script.readline() - if not firstline.startswith(b'#!python'): - return False - exename = sys.executable.encode(sys.getfilesystemencoding()) - firstline = b'#!' + exename + os.linesep.encode("ascii") - rest = script.read() - with open(path, 'wb') as script: - script.write(firstline) - script.write(rest) - return True - -dist_info_re = re.compile(r"""^(?P<namever>(?P<name>.+?)(-(?P<ver>\d.+?))?) - \.dist-info$""", re.VERBOSE) - - -def root_is_purelib(name, wheeldir): - """ - Return True if the extracted wheel in wheeldir should go into purelib. - """ - name_folded = name.replace("-", "_") - for item in os.listdir(wheeldir): - match = dist_info_re.match(item) - if match and match.group('name') == name_folded: - with open(os.path.join(wheeldir, item, 'WHEEL')) as wheel: - for line in wheel: - line = line.lower().rstrip() - if line == "root-is-purelib: true": - return True - return False - - -def get_entrypoints(filename): - if not os.path.exists(filename): - return {}, {} - - # This is done because you can pass a string to entry_points wrappers which - # means that they may or may not be valid INI files. The attempt here is to - # strip leading and trailing whitespace in order to make them valid INI - # files. - with open(filename) as fp: - data = StringIO() - for line in fp: - data.write(line.strip()) - data.write("\n") - data.seek(0) - - cp = configparser.RawConfigParser() - cp.optionxform = lambda option: option - cp.readfp(data) - - console = {} - gui = {} - if cp.has_section('console_scripts'): - console = dict(cp.items('console_scripts')) - if cp.has_section('gui_scripts'): - gui = dict(cp.items('gui_scripts')) - return console, gui - - -def move_wheel_files(name, req, wheeldir, user=False, home=None, root=None, - pycompile=True, scheme=None, isolated=False, prefix=None): - """Install a wheel""" - - if not scheme: - scheme = distutils_scheme( - name, user=user, home=home, root=root, isolated=isolated, - prefix=prefix, - ) - - if root_is_purelib(name, wheeldir): - lib_dir = scheme['purelib'] - else: - lib_dir = scheme['platlib'] - - info_dir = [] - data_dirs = [] - source = wheeldir.rstrip(os.path.sep) + os.path.sep - - # Record details of the files moved - # installed = files copied from the wheel to the destination - # changed = files changed while installing (scripts #! line typically) - # generated = files newly generated during the install (script wrappers) - installed = {} - changed = set() - generated = [] - - # Compile all of the pyc files that we're going to be installing - if pycompile: - with captured_stdout() as stdout: - with warnings.catch_warnings(): - warnings.filterwarnings('ignore') - compileall.compile_dir(source, force=True, quiet=True) - logger.debug(stdout.getvalue()) - - def normpath(src, p): - return os.path.relpath(src, p).replace(os.path.sep, '/') - - def record_installed(srcfile, destfile, modified=False): - """Map archive RECORD paths to installation RECORD paths.""" - oldpath = normpath(srcfile, wheeldir) - newpath = normpath(destfile, lib_dir) - installed[oldpath] = newpath - if modified: - changed.add(destfile) - - def clobber(source, dest, is_base, fixer=None, filter=None): - ensure_dir(dest) # common for the 'include' path - - for dir, subdirs, files in os.walk(source): - basedir = dir[len(source):].lstrip(os.path.sep) - destdir = os.path.join(dest, basedir) - if is_base and basedir.split(os.path.sep, 1)[0].endswith('.data'): - continue - for s in subdirs: - destsubdir = os.path.join(dest, basedir, s) - if is_base and basedir == '' and destsubdir.endswith('.data'): - data_dirs.append(s) - continue - elif (is_base and - s.endswith('.dist-info') and - canonicalize_name(s).startswith( - canonicalize_name(req.name))): - assert not info_dir, ('Multiple .dist-info directories: ' + - destsubdir + ', ' + - ', '.join(info_dir)) - info_dir.append(destsubdir) - for f in files: - # Skip unwanted files - if filter and filter(f): - continue - srcfile = os.path.join(dir, f) - destfile = os.path.join(dest, basedir, f) - # directory creation is lazy and after the file filtering above - # to ensure we don't install empty dirs; empty dirs can't be - # uninstalled. - ensure_dir(destdir) - - # We use copyfile (not move, copy, or copy2) to be extra sure - # that we are not moving directories over (copyfile fails for - # directories) as well as to ensure that we are not copying - # over any metadata because we want more control over what - # metadata we actually copy over. - shutil.copyfile(srcfile, destfile) - - # Copy over the metadata for the file, currently this only - # includes the atime and mtime. - st = os.stat(srcfile) - if hasattr(os, "utime"): - os.utime(destfile, (st.st_atime, st.st_mtime)) - - # If our file is executable, then make our destination file - # executable. - if os.access(srcfile, os.X_OK): - st = os.stat(srcfile) - permissions = ( - st.st_mode | stat.S_IXUSR | stat.S_IXGRP | stat.S_IXOTH - ) - os.chmod(destfile, permissions) - - changed = False - if fixer: - changed = fixer(destfile) - record_installed(srcfile, destfile, changed) - - clobber(source, lib_dir, True) - - assert info_dir, "%s .dist-info directory not found" % req - - # Get the defined entry points - ep_file = os.path.join(info_dir[0], 'entry_points.txt') - console, gui = get_entrypoints(ep_file) - - def is_entrypoint_wrapper(name): - # EP, EP.exe and EP-script.py are scripts generated for - # entry point EP by setuptools - if name.lower().endswith('.exe'): - matchname = name[:-4] - elif name.lower().endswith('-script.py'): - matchname = name[:-10] - elif name.lower().endswith(".pya"): - matchname = name[:-4] - else: - matchname = name - # Ignore setuptools-generated scripts - return (matchname in console or matchname in gui) - - for datadir in data_dirs: - fixer = None - filter = None - for subdir in os.listdir(os.path.join(wheeldir, datadir)): - fixer = None - if subdir == 'scripts': - fixer = fix_script - filter = is_entrypoint_wrapper - source = os.path.join(wheeldir, datadir, subdir) - dest = scheme[subdir] - clobber(source, dest, False, fixer=fixer, filter=filter) - - maker = ScriptMaker(None, scheme['scripts']) - - # Ensure old scripts are overwritten. - # See https://github.com/pypa/pip/issues/1800 - maker.clobber = True - - # Ensure we don't generate any variants for scripts because this is almost - # never what somebody wants. - # See https://bitbucket.org/pypa/distlib/issue/35/ - maker.variants = set(('', )) - - # This is required because otherwise distlib creates scripts that are not - # executable. - # See https://bitbucket.org/pypa/distlib/issue/32/ - maker.set_mode = True - - # Simplify the script and fix the fact that the default script swallows - # every single stack trace. - # See https://bitbucket.org/pypa/distlib/issue/34/ - # See https://bitbucket.org/pypa/distlib/issue/33/ - def _get_script_text(entry): - if entry.suffix is None: - raise InstallationError( - "Invalid script entry point: %s for req: %s - A callable " - "suffix is required. Cf https://packaging.python.org/en/" - "latest/distributing.html#console-scripts for more " - "information." % (entry, req) - ) - return maker.script_template % { - "module": entry.prefix, - "import_name": entry.suffix.split(".")[0], - "func": entry.suffix, - } - - maker._get_script_text = _get_script_text - maker.script_template = """# -*- coding: utf-8 -*- -import re -import sys - -from %(module)s import %(import_name)s - -if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit(%(func)s()) -""" - - # Special case pip and setuptools to generate versioned wrappers - # - # The issue is that some projects (specifically, pip and setuptools) use - # code in setup.py to create "versioned" entry points - pip2.7 on Python - # 2.7, pip3.3 on Python 3.3, etc. But these entry points are baked into - # the wheel metadata at build time, and so if the wheel is installed with - # a *different* version of Python the entry points will be wrong. The - # correct fix for this is to enhance the metadata to be able to describe - # such versioned entry points, but that won't happen till Metadata 2.0 is - # available. - # In the meantime, projects using versioned entry points will either have - # incorrect versioned entry points, or they will not be able to distribute - # "universal" wheels (i.e., they will need a wheel per Python version). - # - # Because setuptools and pip are bundled with _ensurepip and virtualenv, - # we need to use universal wheels. So, as a stopgap until Metadata 2.0, we - # override the versioned entry points in the wheel and generate the - # correct ones. This code is purely a short-term measure until Metadata 2.0 - # is available. - # - # To add the level of hack in this section of code, in order to support - # ensurepip this code will look for an ``ENSUREPIP_OPTIONS`` environment - # variable which will control which version scripts get installed. - # - # ENSUREPIP_OPTIONS=altinstall - # - Only pipX.Y and easy_install-X.Y will be generated and installed - # ENSUREPIP_OPTIONS=install - # - pipX.Y, pipX, easy_install-X.Y will be generated and installed. Note - # that this option is technically if ENSUREPIP_OPTIONS is set and is - # not altinstall - # DEFAULT - # - The default behavior is to install pip, pipX, pipX.Y, easy_install - # and easy_install-X.Y. - pip_script = console.pop('pip', None) - if pip_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'pip = ' + pip_script - generated.extend(maker.make(spec)) - - if os.environ.get("ENSUREPIP_OPTIONS", "") != "altinstall": - spec = 'pip%s = %s' % (sys.version[:1], pip_script) - generated.extend(maker.make(spec)) - - spec = 'pip%s = %s' % (sys.version[:3], pip_script) - generated.extend(maker.make(spec)) - # Delete any other versioned pip entry points - pip_ep = [k for k in console if re.match(r'pip(\d(\.\d)?)?$', k)] - for k in pip_ep: - del console[k] - easy_install_script = console.pop('easy_install', None) - if easy_install_script: - if "ENSUREPIP_OPTIONS" not in os.environ: - spec = 'easy_install = ' + easy_install_script - generated.extend(maker.make(spec)) - - spec = 'easy_install-%s = %s' % (sys.version[:3], easy_install_script) - generated.extend(maker.make(spec)) - # Delete any other versioned easy_install entry points - easy_install_ep = [ - k for k in console if re.match(r'easy_install(-\d\.\d)?$', k) - ] - for k in easy_install_ep: - del console[k] - - # Generate the console and GUI entry points specified in the wheel - if len(console) > 0: - generated.extend( - maker.make_multiple(['%s = %s' % kv for kv in console.items()]) - ) - if len(gui) > 0: - generated.extend( - maker.make_multiple( - ['%s = %s' % kv for kv in gui.items()], - {'gui': True} - ) - ) - - # Record pip as the installer - installer = os.path.join(info_dir[0], 'INSTALLER') - temp_installer = os.path.join(info_dir[0], 'INSTALLER.pip') - with open(temp_installer, 'wb') as installer_file: - installer_file.write(b'pip\n') - shutil.move(temp_installer, installer) - generated.append(installer) - - # Record details of all files installed - record = os.path.join(info_dir[0], 'RECORD') - temp_record = os.path.join(info_dir[0], 'RECORD.pip') - with open_for_csv(record, 'r') as record_in: - with open_for_csv(temp_record, 'w+') as record_out: - reader = csv.reader(record_in) - writer = csv.writer(record_out) - for row in reader: - row[0] = installed.pop(row[0], row[0]) - if row[0] in changed: - row[1], row[2] = rehash(row[0]) - writer.writerow(row) - for f in generated: - h, l = rehash(f) - writer.writerow((normpath(f, lib_dir), h, l)) - for f in installed: - writer.writerow((installed[f], '', '')) - shutil.move(temp_record, record) - - -def _unique(fn): - @functools.wraps(fn) - def unique(*args, **kw): - seen = set() - for item in fn(*args, **kw): - if item not in seen: - seen.add(item) - yield item - return unique - - -# TODO: this goes somewhere besides the wheel module -@_unique -def uninstallation_paths(dist): - """ - Yield all the uninstallation paths for dist based on RECORD-without-.pyc - - Yield paths to all the files in RECORD. For each .py file in RECORD, add - the .pyc in the same directory. - - UninstallPathSet.add() takes care of the __pycache__ .pyc. - """ - from pip.utils import FakeFile # circular import - r = csv.reader(FakeFile(dist.get_metadata_lines('RECORD'))) - for row in r: - path = os.path.join(dist.location, row[0]) - yield path - if path.endswith('.py'): - dn, fn = os.path.split(path) - base = fn[:-3] - path = os.path.join(dn, base + '.pyc') - yield path - - -def wheel_version(source_dir): - """ - Return the Wheel-Version of an extracted wheel, if possible. - - Otherwise, return False if we couldn't parse / extract it. - """ - try: - dist = [d for d in pkg_resources.find_on_path(None, source_dir)][0] - - wheel_data = dist.get_metadata('WHEEL') - wheel_data = Parser().parsestr(wheel_data) - - version = wheel_data['Wheel-Version'].strip() - version = tuple(map(int, version.split('.'))) - return version - except: - return False - - -def check_compatibility(version, name): - """ - Raises errors or warns if called with an incompatible Wheel-Version. - - Pip should refuse to install a Wheel-Version that's a major series - ahead of what it's compatible with (e.g 2.0 > 1.1); and warn when - installing a version only minor version ahead (e.g 1.2 > 1.1). - - version: a 2-tuple representing a Wheel-Version (Major, Minor) - name: name of wheel or package to raise exception about - - :raises UnsupportedWheel: when an incompatible Wheel-Version is given - """ - if not version: - raise UnsupportedWheel( - "%s is in an unsupported or invalid wheel" % name - ) - if version[0] > VERSION_COMPATIBLE[0]: - raise UnsupportedWheel( - "%s's Wheel-Version (%s) is not compatible with this version " - "of pip" % (name, '.'.join(map(str, version))) - ) - elif version > VERSION_COMPATIBLE: - logger.warning( - 'Installing from a newer Wheel-Version (%s)', - '.'.join(map(str, version)), - ) - - -class Wheel(object): - """A wheel file""" - - # TODO: maybe move the install code into this class - - wheel_file_re = re.compile( - r"""^(?P<namever>(?P<name>.+?)-(?P<ver>\d.*?)) - ((-(?P<build>\d.*?))?-(?P<pyver>.+?)-(?P<abi>.+?)-(?P<plat>.+?) - \.whl|\.dist-info)$""", - re.VERBOSE - ) - - def __init__(self, filename): - """ - :raises InvalidWheelFilename: when the filename is invalid for a wheel - """ - wheel_info = self.wheel_file_re.match(filename) - if not wheel_info: - raise InvalidWheelFilename( - "%s is not a valid wheel filename." % filename - ) - self.filename = filename - self.name = wheel_info.group('name').replace('_', '-') - # we'll assume "_" means "-" due to wheel naming scheme - # (https://github.com/pypa/pip/issues/1150) - self.version = wheel_info.group('ver').replace('_', '-') - self.pyversions = wheel_info.group('pyver').split('.') - self.abis = wheel_info.group('abi').split('.') - self.plats = wheel_info.group('plat').split('.') - - # All the tag combinations from this file - self.file_tags = set( - (x, y, z) for x in self.pyversions - for y in self.abis for z in self.plats - ) - - def support_index_min(self, tags=None): - """ - Return the lowest index that one of the wheel's file_tag combinations - achieves in the supported_tags list e.g. if there are 8 supported tags, - and one of the file tags is first in the list, then return 0. Returns - None is the wheel is not supported. - """ - if tags is None: # for mock - tags = pep425tags.supported_tags - indexes = [tags.index(c) for c in self.file_tags if c in tags] - return min(indexes) if indexes else None - - def supported(self, tags=None): - """Is this wheel supported on this system?""" - if tags is None: # for mock - tags = pep425tags.supported_tags - return bool(set(tags).intersection(self.file_tags)) - - -class WheelBuilder(object): - """Build wheels from a RequirementSet.""" - - def __init__(self, requirement_set, finder, build_options=None, - global_options=None): - self.requirement_set = requirement_set - self.finder = finder - self._cache_root = requirement_set._wheel_cache._cache_dir - self._wheel_dir = requirement_set.wheel_download_dir - self.build_options = build_options or [] - self.global_options = global_options or [] - - def _build_one(self, req, output_dir, python_tag=None): - """Build one wheel. - - :return: The filename of the built wheel, or None if the build failed. - """ - tempd = tempfile.mkdtemp('pip-wheel-') - try: - if self.__build_one(req, tempd, python_tag=python_tag): - try: - wheel_name = os.listdir(tempd)[0] - wheel_path = os.path.join(output_dir, wheel_name) - shutil.move(os.path.join(tempd, wheel_name), wheel_path) - logger.info('Stored in directory: %s', output_dir) - return wheel_path - except: - pass - # Ignore return, we can't do anything else useful. - self._clean_one(req) - return None - finally: - rmtree(tempd) - - def _base_setup_args(self, req): - return [ - sys.executable, "-u", '-c', - SETUPTOOLS_SHIM % req.setup_py - ] + list(self.global_options) - - def __build_one(self, req, tempd, python_tag=None): - base_args = self._base_setup_args(req) - - spin_message = 'Running setup.py bdist_wheel for %s' % (req.name,) - with open_spinner(spin_message) as spinner: - logger.debug('Destination directory: %s', tempd) - wheel_args = base_args + ['bdist_wheel', '-d', tempd] \ - + self.build_options - - if python_tag is not None: - wheel_args += ["--python-tag", python_tag] - - try: - call_subprocess(wheel_args, cwd=req.setup_py_dir, - show_stdout=False, spinner=spinner) - return True - except: - spinner.finish("error") - logger.error('Failed building wheel for %s', req.name) - return False - - def _clean_one(self, req): - base_args = self._base_setup_args(req) - - logger.info('Running setup.py clean for %s', req.name) - clean_args = base_args + ['clean', '--all'] - try: - call_subprocess(clean_args, cwd=req.source_dir, show_stdout=False) - return True - except: - logger.error('Failed cleaning build dir for %s', req.name) - return False - - def build(self, autobuilding=False): - """Build wheels. - - :param unpack: If True, replace the sdist we built from with the - newly built wheel, in preparation for installation. - :return: True if all the wheels built correctly. - """ - assert self._wheel_dir or (autobuilding and self._cache_root) - # unpack sdists and constructs req set - self.requirement_set.prepare_files(self.finder) - - reqset = self.requirement_set.requirements.values() - - buildset = [] - for req in reqset: - if req.constraint: - continue - if req.is_wheel: - if not autobuilding: - logger.info( - 'Skipping %s, due to already being wheel.', req.name) - elif autobuilding and req.editable: - pass - elif autobuilding and req.link and not req.link.is_artifact: - pass - elif autobuilding and not req.source_dir: - pass - else: - if autobuilding: - link = req.link - base, ext = link.splitext() - if pip.index.egg_info_matches(base, None, link) is None: - # Doesn't look like a package - don't autobuild a wheel - # because we'll have no way to lookup the result sanely - continue - if "binary" not in pip.index.fmt_ctl_formats( - self.finder.format_control, - canonicalize_name(req.name)): - logger.info( - "Skipping bdist_wheel for %s, due to binaries " - "being disabled for it.", req.name) - continue - buildset.append(req) - - if not buildset: - return True - - # Build the wheels. - logger.info( - 'Building wheels for collected packages: %s', - ', '.join([req.name for req in buildset]), - ) - with indent_log(): - build_success, build_failure = [], [] - for req in buildset: - python_tag = None - if autobuilding: - python_tag = pep425tags.implementation_tag - output_dir = _cache_for_link(self._cache_root, req.link) - try: - ensure_dir(output_dir) - except OSError as e: - logger.warning("Building wheel for %s failed: %s", - req.name, e) - build_failure.append(req) - continue - else: - output_dir = self._wheel_dir - wheel_file = self._build_one( - req, output_dir, - python_tag=python_tag, - ) - if wheel_file: - build_success.append(req) - if autobuilding: - # XXX: This is mildly duplicative with prepare_files, - # but not close enough to pull out to a single common - # method. - # The code below assumes temporary source dirs - - # prevent it doing bad things. - if req.source_dir and not os.path.exists(os.path.join( - req.source_dir, PIP_DELETE_MARKER_FILENAME)): - raise AssertionError( - "bad source dir - missing marker") - # Delete the source we built the wheel from - req.remove_temporary_source() - # set the build directory again - name is known from - # the work prepare_files did. - req.source_dir = req.build_location( - self.requirement_set.build_dir) - # Update the link for this. - req.link = pip.index.Link( - path_to_url(wheel_file)) - assert req.link.is_wheel - # extract the wheel into the dir - unpack_url( - req.link, req.source_dir, None, False, - session=self.requirement_set.session) - else: - build_failure.append(req) - - # notify success/failure - if build_success: - logger.info( - 'Successfully built %s', - ' '.join([req.name for req in build_success]), - ) - if build_failure: - logger.info( - 'Failed to build %s', - ' '.join([req.name for req in build_failure]), - ) - # Return True if all builds were successful - return len(build_failure) == 0 diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst deleted file mode 100644 index e118723..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,3 +0,0 @@ -UNKNOWN - - diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/INSTALLER b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/METADATA b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/METADATA deleted file mode 100644 index 7a50487..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/METADATA +++ /dev/null @@ -1,13 +0,0 @@ -Metadata-Version: 2.0 -Name: pkg_resources -Version: 0.0.0 -Summary: UNKNOWN -Home-page: UNKNOWN -Author: UNKNOWN -Author-email: UNKNOWN -License: UNKNOWN -Platform: UNKNOWN - -UNKNOWN - - diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/RECORD b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/RECORD deleted file mode 100644 index f3f8526..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/RECORD +++ /dev/null @@ -1,38 +0,0 @@ -pkg_resources/__init__.py,sha256=YQ4_WQnPztMsUy1yuvp7ZRBPK9IhOyhgosLpvkFso1I,103551 -pkg_resources/py31compat.py,sha256=-ysVqoxLetAnL94uM0kHkomKQTC1JZLN2ZUjqUhMeKE,600 -pkg_resources/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -pkg_resources/_vendor/appdirs.py,sha256=mExjXejqnz0UifNzjccc-yJ8JS0TfAW0gXuikOdDQLU,22376 -pkg_resources/_vendor/pyparsing.py,sha256=LTlkTtpPBJ0ypOyi---Es1IvmxfI_vh28VJEh27qY8I,229871 -pkg_resources/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 -pkg_resources/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 -pkg_resources/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 -pkg_resources/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 -pkg_resources/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 -pkg_resources/_vendor/packaging/markers.py,sha256=uEcBBtGvzqltgnArqb9c4RrcInXezDLos14zbBHhWJo,8248 -pkg_resources/_vendor/packaging/requirements.py,sha256=SikL2UynbsT0qtY9ltqngndha_sfo0w6XGFhAhoSoaQ,4355 -pkg_resources/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 -pkg_resources/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 -pkg_resources/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 -pkg_resources/extern/__init__.py,sha256=JUtlHHvlxHSNuB4pWqNjcx7n6kG-fwXg7qmJ2zNJlIY,2487 -pkg_resources-0.0.0.dist-info/DESCRIPTION.rst,sha256=OCTuuN6LcWulhHS3d5rfjdsQtW22n7HENFRh6jC6ego,10 -pkg_resources-0.0.0.dist-info/METADATA,sha256=FOYDX6cmnDUkWo-yhqWQYtjKIMZR2IW2G1GFZhA6gUQ,177 -pkg_resources-0.0.0.dist-info/RECORD,, -pkg_resources-0.0.0.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -pkg_resources-0.0.0.dist-info/metadata.json,sha256=jbGQ09fxsrPQMhbeLboAEDZERlOExjHIGENdQdPS6RU,221 -pkg_resources-0.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -pkg_resources/extern/__pycache__/__init__.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/version.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/markers.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/utils.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-36.pyc,, -pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc,, -pkg_resources/_vendor/__pycache__/pyparsing.cpython-36.pyc,, -pkg_resources/_vendor/__pycache__/appdirs.cpython-36.pyc,, -pkg_resources/_vendor/__pycache__/__init__.cpython-36.pyc,, -pkg_resources/_vendor/__pycache__/six.cpython-36.pyc,, -pkg_resources/__pycache__/__init__.cpython-36.pyc,, -pkg_resources/__pycache__/py31compat.cpython-36.pyc,, diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/WHEEL b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/WHEEL deleted file mode 100644 index 7332a41..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/metadata.json b/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/metadata.json deleted file mode 100644 index 1069edb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources-0.0.0.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}}}, "generator": "bdist_wheel (0.30.0)", "metadata_version": "2.0", "name": "pkg_resources", "summary": "UNKNOWN", "version": "0.0.0"} \ No newline at end of file diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__init__.py deleted file mode 100644 index 8d95bd2..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__init__.py +++ /dev/null @@ -1,3125 +0,0 @@ -# coding: utf-8 -""" -Package resource API --------------------- - -A resource is a logical file contained within a package, or a logical -subdirectory thereof. The package resource API expects resource names -to have their path parts separated with ``/``, *not* whatever the local -path separator is. Do not use os.path operations to manipulate resource -names being passed into the API. - -The package resource API is designed to work with normal filesystem packages, -.egg files, and unpacked .egg files. It can also work in a limited way with -.zip files and with custom PEP 302 loaders that support the ``get_data()`` -method. -""" - -from __future__ import absolute_import - -import sys -import os -import io -import time -import re -import types -import zipfile -import zipimport -import warnings -import stat -import functools -import pkgutil -import operator -import platform -import collections -import plistlib -import email.parser -import errno -import tempfile -import textwrap -import itertools -import inspect -from pkgutil import get_importer - -try: - import _imp -except ImportError: - # Python 3.2 compatibility - import imp as _imp - -from pkg_resources.extern import six -from pkg_resources.extern.six.moves import urllib, map, filter - -# capture these to bypass sandboxing -from os import utime -try: - from os import mkdir, rename, unlink - WRITE_SUPPORT = True -except ImportError: - # no write support, probably under GAE - WRITE_SUPPORT = False - -from os import open as os_open -from os.path import isdir, split - -try: - import importlib.machinery as importlib_machinery - # access attribute to force import under delayed import mechanisms. - importlib_machinery.__name__ -except ImportError: - importlib_machinery = None - -from . import py31compat -from pkg_resources.extern import appdirs -from pkg_resources.extern import packaging -__import__('pkg_resources.extern.packaging.version') -__import__('pkg_resources.extern.packaging.specifiers') -__import__('pkg_resources.extern.packaging.requirements') -__import__('pkg_resources.extern.packaging.markers') - - -if (3, 0) < sys.version_info < (3, 3): - raise RuntimeError("Python 3.3 or later is required") - -if six.PY2: - # Those builtin exceptions are only defined in Python 3 - PermissionError = None - NotADirectoryError = None - -# declare some globals that will be defined later to -# satisfy the linters. -require = None -working_set = None -add_activation_listener = None -resources_stream = None -cleanup_resources = None -resource_dir = None -resource_stream = None -set_extraction_path = None -resource_isdir = None -resource_string = None -iter_entry_points = None -resource_listdir = None -resource_filename = None -resource_exists = None -_distribution_finders = None -_namespace_handlers = None -_namespace_packages = None - - -class PEP440Warning(RuntimeWarning): - """ - Used when there is an issue with a version or specifier not complying with - PEP 440. - """ - - -def parse_version(v): - try: - return packaging.version.Version(v) - except packaging.version.InvalidVersion: - return packaging.version.LegacyVersion(v) - - -_state_vars = {} - - -def _declare_state(vartype, **kw): - globals().update(kw) - _state_vars.update(dict.fromkeys(kw, vartype)) - - -def __getstate__(): - state = {} - g = globals() - for k, v in _state_vars.items(): - state[k] = g['_sget_' + v](g[k]) - return state - - -def __setstate__(state): - g = globals() - for k, v in state.items(): - g['_sset_' + _state_vars[k]](k, g[k], v) - return state - - -def _sget_dict(val): - return val.copy() - - -def _sset_dict(key, ob, state): - ob.clear() - ob.update(state) - - -def _sget_object(val): - return val.__getstate__() - - -def _sset_object(key, ob, state): - ob.__setstate__(state) - - -_sget_none = _sset_none = lambda *args: None - - -def get_supported_platform(): - """Return this platform's maximum compatible version. - - distutils.util.get_platform() normally reports the minimum version - of Mac OS X that would be required to *use* extensions produced by - distutils. But what we want when checking compatibility is to know the - version of Mac OS X that we are *running*. To allow usage of packages that - explicitly require a newer version of Mac OS X, we must also know the - current version of the OS. - - If this condition occurs for any other platform with a version in its - platform strings, this function should be extended accordingly. - """ - plat = get_build_platform() - m = macosVersionString.match(plat) - if m is not None and sys.platform == "darwin": - try: - plat = 'macosx-%s-%s' % ('.'.join(_macosx_vers()[:2]), m.group(3)) - except ValueError: - # not Mac OS X - pass - return plat - - -__all__ = [ - # Basic resource access and distribution/entry point discovery - 'require', 'run_script', 'get_provider', 'get_distribution', - 'load_entry_point', 'get_entry_map', 'get_entry_info', - 'iter_entry_points', - 'resource_string', 'resource_stream', 'resource_filename', - 'resource_listdir', 'resource_exists', 'resource_isdir', - - # Environmental control - 'declare_namespace', 'working_set', 'add_activation_listener', - 'find_distributions', 'set_extraction_path', 'cleanup_resources', - 'get_default_cache', - - # Primary implementation classes - 'Environment', 'WorkingSet', 'ResourceManager', - 'Distribution', 'Requirement', 'EntryPoint', - - # Exceptions - 'ResolutionError', 'VersionConflict', 'DistributionNotFound', - 'UnknownExtra', 'ExtractionError', - - # Warnings - 'PEP440Warning', - - # Parsing functions and string utilities - 'parse_requirements', 'parse_version', 'safe_name', 'safe_version', - 'get_platform', 'compatible_platforms', 'yield_lines', 'split_sections', - 'safe_extra', 'to_filename', 'invalid_marker', 'evaluate_marker', - - # filesystem utilities - 'ensure_directory', 'normalize_path', - - # Distribution "precedence" constants - 'EGG_DIST', 'BINARY_DIST', 'SOURCE_DIST', 'CHECKOUT_DIST', 'DEVELOP_DIST', - - # "Provider" interfaces, implementations, and registration/lookup APIs - 'IMetadataProvider', 'IResourceProvider', 'FileMetadata', - 'PathMetadata', 'EggMetadata', 'EmptyProvider', 'empty_provider', - 'NullProvider', 'EggProvider', 'DefaultProvider', 'ZipProvider', - 'register_finder', 'register_namespace_handler', 'register_loader_type', - 'fixup_namespace_packages', 'get_importer', - - # Deprecated/backward compatibility only - 'run_main', 'AvailableDistributions', -] - - -class ResolutionError(Exception): - """Abstract base for dependency resolution errors""" - - def __repr__(self): - return self.__class__.__name__ + repr(self.args) - - -class VersionConflict(ResolutionError): - """ - An already-installed version conflicts with the requested version. - - Should be initialized with the installed Distribution and the requested - Requirement. - """ - - _template = "{self.dist} is installed but {self.req} is required" - - @property - def dist(self): - return self.args[0] - - @property - def req(self): - return self.args[1] - - def report(self): - return self._template.format(**locals()) - - def with_context(self, required_by): - """ - If required_by is non-empty, return a version of self that is a - ContextualVersionConflict. - """ - if not required_by: - return self - args = self.args + (required_by,) - return ContextualVersionConflict(*args) - - -class ContextualVersionConflict(VersionConflict): - """ - A VersionConflict that accepts a third parameter, the set of the - requirements that required the installed Distribution. - """ - - _template = VersionConflict._template + ' by {self.required_by}' - - @property - def required_by(self): - return self.args[2] - - -class DistributionNotFound(ResolutionError): - """A requested distribution was not found""" - - _template = ("The '{self.req}' distribution was not found " - "and is required by {self.requirers_str}") - - @property - def req(self): - return self.args[0] - - @property - def requirers(self): - return self.args[1] - - @property - def requirers_str(self): - if not self.requirers: - return 'the application' - return ', '.join(self.requirers) - - def report(self): - return self._template.format(**locals()) - - def __str__(self): - return self.report() - - -class UnknownExtra(ResolutionError): - """Distribution doesn't have an "extra feature" of the given name""" - - -_provider_factories = {} - -PY_MAJOR = sys.version[:3] -EGG_DIST = 3 -BINARY_DIST = 2 -SOURCE_DIST = 1 -CHECKOUT_DIST = 0 -DEVELOP_DIST = -1 - - -def register_loader_type(loader_type, provider_factory): - """Register `provider_factory` to make providers for `loader_type` - - `loader_type` is the type or class of a PEP 302 ``module.__loader__``, - and `provider_factory` is a function that, passed a *module* object, - returns an ``IResourceProvider`` for that module. - """ - _provider_factories[loader_type] = provider_factory - - -def get_provider(moduleOrReq): - """Return an IResourceProvider for the named module or requirement""" - if isinstance(moduleOrReq, Requirement): - return working_set.find(moduleOrReq) or require(str(moduleOrReq))[0] - try: - module = sys.modules[moduleOrReq] - except KeyError: - __import__(moduleOrReq) - module = sys.modules[moduleOrReq] - loader = getattr(module, '__loader__', None) - return _find_adapter(_provider_factories, loader)(module) - - -def _macosx_vers(_cache=[]): - if not _cache: - version = platform.mac_ver()[0] - # fallback for MacPorts - if version == '': - plist = '/System/Library/CoreServices/SystemVersion.plist' - if os.path.exists(plist): - if hasattr(plistlib, 'readPlist'): - plist_content = plistlib.readPlist(plist) - if 'ProductVersion' in plist_content: - version = plist_content['ProductVersion'] - - _cache.append(version.split('.')) - return _cache[0] - - -def _macosx_arch(machine): - return {'PowerPC': 'ppc', 'Power_Macintosh': 'ppc'}.get(machine, machine) - - -def get_build_platform(): - """Return this platform's string for platform-specific distributions - - XXX Currently this is the same as ``distutils.util.get_platform()``, but it - needs some hacks for Linux and Mac OS X. - """ - try: - # Python 2.7 or >=3.2 - from sysconfig import get_platform - except ImportError: - from distutils.util import get_platform - - plat = get_platform() - if sys.platform == "darwin" and not plat.startswith('macosx-'): - try: - version = _macosx_vers() - machine = os.uname()[4].replace(" ", "_") - return "macosx-%d.%d-%s" % ( - int(version[0]), int(version[1]), - _macosx_arch(machine), - ) - except ValueError: - # if someone is running a non-Mac darwin system, this will fall - # through to the default implementation - pass - return plat - - -macosVersionString = re.compile(r"macosx-(\d+)\.(\d+)-(.*)") -darwinVersionString = re.compile(r"darwin-(\d+)\.(\d+)\.(\d+)-(.*)") -# XXX backward compat -get_platform = get_build_platform - - -def compatible_platforms(provided, required): - """Can code for the `provided` platform run on the `required` platform? - - Returns true if either platform is ``None``, or the platforms are equal. - - XXX Needs compatibility checks for Linux and other unixy OSes. - """ - if provided is None or required is None or provided == required: - # easy case - return True - - # Mac OS X special cases - reqMac = macosVersionString.match(required) - if reqMac: - provMac = macosVersionString.match(provided) - - # is this a Mac package? - if not provMac: - # this is backwards compatibility for packages built before - # setuptools 0.6. All packages built after this point will - # use the new macosx designation. - provDarwin = darwinVersionString.match(provided) - if provDarwin: - dversion = int(provDarwin.group(1)) - macosversion = "%s.%s" % (reqMac.group(1), reqMac.group(2)) - if dversion == 7 and macosversion >= "10.3" or \ - dversion == 8 and macosversion >= "10.4": - return True - # egg isn't macosx or legacy darwin - return False - - # are they the same major version and machine type? - if provMac.group(1) != reqMac.group(1) or \ - provMac.group(3) != reqMac.group(3): - return False - - # is the required OS major update >= the provided one? - if int(provMac.group(2)) > int(reqMac.group(2)): - return False - - return True - - # XXX Linux and other platforms' special cases should go here - return False - - -def run_script(dist_spec, script_name): - """Locate distribution `dist_spec` and run its `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - require(dist_spec)[0].run_script(script_name, ns) - - -# backward compatibility -run_main = run_script - - -def get_distribution(dist): - """Return a current distribution object for a Requirement or string""" - if isinstance(dist, six.string_types): - dist = Requirement.parse(dist) - if isinstance(dist, Requirement): - dist = get_provider(dist) - if not isinstance(dist, Distribution): - raise TypeError("Expected string, Requirement, or Distribution", dist) - return dist - - -def load_entry_point(dist, group, name): - """Return `name` entry point of `group` for `dist` or raise ImportError""" - return get_distribution(dist).load_entry_point(group, name) - - -def get_entry_map(dist, group=None): - """Return the entry point map for `group`, or the full entry map""" - return get_distribution(dist).get_entry_map(group) - - -def get_entry_info(dist, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return get_distribution(dist).get_entry_info(group, name) - - -class IMetadataProvider: - def has_metadata(name): - """Does the package's distribution contain the named metadata?""" - - def get_metadata(name): - """The named metadata resource as a string""" - - def get_metadata_lines(name): - """Yield named metadata resource as list of non-blank non-comment lines - - Leading and trailing whitespace is stripped from each line, and lines - with ``#`` as the first non-blank character are omitted.""" - - def metadata_isdir(name): - """Is the named metadata a directory? (like ``os.path.isdir()``)""" - - def metadata_listdir(name): - """List of metadata names in the directory (like ``os.listdir()``)""" - - def run_script(script_name, namespace): - """Execute the named script in the supplied namespace dictionary""" - - -class IResourceProvider(IMetadataProvider): - """An object that provides access to package resources""" - - def get_resource_filename(manager, resource_name): - """Return a true filesystem path for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_stream(manager, resource_name): - """Return a readable file-like object for `resource_name` - - `manager` must be an ``IResourceManager``""" - - def get_resource_string(manager, resource_name): - """Return a string containing the contents of `resource_name` - - `manager` must be an ``IResourceManager``""" - - def has_resource(resource_name): - """Does the package contain the named resource?""" - - def resource_isdir(resource_name): - """Is the named resource a directory? (like ``os.path.isdir()``)""" - - def resource_listdir(resource_name): - """List of resource names in the directory (like ``os.listdir()``)""" - - -class WorkingSet(object): - """A collection of active distributions on sys.path (or a similar list)""" - - def __init__(self, entries=None): - """Create working set from list of path entries (default=sys.path)""" - self.entries = [] - self.entry_keys = {} - self.by_key = {} - self.callbacks = [] - - if entries is None: - entries = sys.path - - for entry in entries: - self.add_entry(entry) - - @classmethod - def _build_master(cls): - """ - Prepare the master working set. - """ - ws = cls() - try: - from __main__ import __requires__ - except ImportError: - # The main program does not list any requirements - return ws - - # ensure the requirements are met - try: - ws.require(__requires__) - except VersionConflict: - return cls._build_from_requirements(__requires__) - - return ws - - @classmethod - def _build_from_requirements(cls, req_spec): - """ - Build a working set from a requirement spec. Rewrites sys.path. - """ - # try it without defaults already on sys.path - # by starting with an empty path - ws = cls([]) - reqs = parse_requirements(req_spec) - dists = ws.resolve(reqs, Environment()) - for dist in dists: - ws.add(dist) - - # add any missing entries from sys.path - for entry in sys.path: - if entry not in ws.entries: - ws.add_entry(entry) - - # then copy back to sys.path - sys.path[:] = ws.entries - return ws - - def add_entry(self, entry): - """Add a path item to ``.entries``, finding any distributions on it - - ``find_distributions(entry, True)`` is used to find distributions - corresponding to the path entry, and they are added. `entry` is - always appended to ``.entries``, even if it is already present. - (This is because ``sys.path`` can contain the same value more than - once, and the ``.entries`` of the ``sys.path`` WorkingSet should always - equal ``sys.path``.) - """ - self.entry_keys.setdefault(entry, []) - self.entries.append(entry) - for dist in find_distributions(entry, True): - self.add(dist, entry, False) - - def __contains__(self, dist): - """True if `dist` is the active distribution for its project""" - return self.by_key.get(dist.key) == dist - - def find(self, req): - """Find a distribution matching requirement `req` - - If there is an active distribution for the requested project, this - returns it as long as it meets the version requirement specified by - `req`. But, if there is an active distribution for the project and it - does *not* meet the `req` requirement, ``VersionConflict`` is raised. - If there is no active distribution for the requested project, ``None`` - is returned. - """ - dist = self.by_key.get(req.key) - if dist is not None and dist not in req: - # XXX add more info - raise VersionConflict(dist, req) - return dist - - def iter_entry_points(self, group, name=None): - """Yield entry point objects from `group` matching `name` - - If `name` is None, yields all entry points in `group` from all - distributions in the working set, otherwise only ones matching - both `group` and `name` are yielded (in distribution order). - """ - for dist in self: - entries = dist.get_entry_map(group) - if name is None: - for ep in entries.values(): - yield ep - elif name in entries: - yield entries[name] - - def run_script(self, requires, script_name): - """Locate distribution for `requires` and run `script_name` script""" - ns = sys._getframe(1).f_globals - name = ns['__name__'] - ns.clear() - ns['__name__'] = name - self.require(requires)[0].run_script(script_name, ns) - - def __iter__(self): - """Yield distributions for non-duplicate projects in the working set - - The yield order is the order in which the items' path entries were - added to the working set. - """ - seen = {} - for item in self.entries: - if item not in self.entry_keys: - # workaround a cache issue - continue - - for key in self.entry_keys[item]: - if key not in seen: - seen[key] = 1 - yield self.by_key[key] - - def add(self, dist, entry=None, insert=True, replace=False): - """Add `dist` to working set, associated with `entry` - - If `entry` is unspecified, it defaults to the ``.location`` of `dist`. - On exit from this routine, `entry` is added to the end of the working - set's ``.entries`` (if it wasn't already present). - - `dist` is only added to the working set if it's for a project that - doesn't already have a distribution in the set, unless `replace=True`. - If it's added, any callbacks registered with the ``subscribe()`` method - will be called. - """ - if insert: - dist.insert_on(self.entries, entry, replace=replace) - - if entry is None: - entry = dist.location - keys = self.entry_keys.setdefault(entry, []) - keys2 = self.entry_keys.setdefault(dist.location, []) - if not replace and dist.key in self.by_key: - # ignore hidden distros - return - - self.by_key[dist.key] = dist - if dist.key not in keys: - keys.append(dist.key) - if dist.key not in keys2: - keys2.append(dist.key) - self._added_new(dist) - - def resolve(self, requirements, env=None, installer=None, - replace_conflicting=False, extras=None): - """List all distributions needed to (recursively) meet `requirements` - - `requirements` must be a sequence of ``Requirement`` objects. `env`, - if supplied, should be an ``Environment`` instance. If - not supplied, it defaults to all distributions available within any - entry or distribution in the working set. `installer`, if supplied, - will be invoked with each requirement that cannot be met by an - already-installed distribution; it should return a ``Distribution`` or - ``None``. - - Unless `replace_conflicting=True`, raises a VersionConflict exception - if - any requirements are found on the path that have the correct name but - the wrong version. Otherwise, if an `installer` is supplied it will be - invoked to obtain the correct version of the requirement and activate - it. - - `extras` is a list of the extras to be used with these requirements. - This is important because extra requirements may look like `my_req; - extra = "my_extra"`, which would otherwise be interpreted as a purely - optional requirement. Instead, we want to be able to assert that these - requirements are truly required. - """ - - # set up the stack - requirements = list(requirements)[::-1] - # set of processed requirements - processed = {} - # key -> dist - best = {} - to_activate = [] - - req_extras = _ReqExtras() - - # Mapping of requirement to set of distributions that required it; - # useful for reporting info about conflicts. - required_by = collections.defaultdict(set) - - while requirements: - # process dependencies breadth-first - req = requirements.pop(0) - if req in processed: - # Ignore cyclic or redundant dependencies - continue - - if not req_extras.markers_pass(req, extras): - continue - - dist = best.get(req.key) - if dist is None: - # Find the best distribution and add it to the map - dist = self.by_key.get(req.key) - if dist is None or (dist not in req and replace_conflicting): - ws = self - if env is None: - if dist is None: - env = Environment(self.entries) - else: - # Use an empty environment and workingset to avoid - # any further conflicts with the conflicting - # distribution - env = Environment([]) - ws = WorkingSet([]) - dist = best[req.key] = env.best_match( - req, ws, installer, - replace_conflicting=replace_conflicting - ) - if dist is None: - requirers = required_by.get(req, None) - raise DistributionNotFound(req, requirers) - to_activate.append(dist) - if dist not in req: - # Oops, the "best" so far conflicts with a dependency - dependent_req = required_by[req] - raise VersionConflict(dist, req).with_context(dependent_req) - - # push the new requirements onto the stack - new_requirements = dist.requires(req.extras)[::-1] - requirements.extend(new_requirements) - - # Register the new requirements needed by req - for new_requirement in new_requirements: - required_by[new_requirement].add(req.project_name) - req_extras[new_requirement] = req.extras - - processed[req] = True - - # return list of distros to activate - return to_activate - - def find_plugins( - self, plugin_env, full_env=None, installer=None, fallback=True): - """Find all activatable distributions in `plugin_env` - - Example usage:: - - distributions, errors = working_set.find_plugins( - Environment(plugin_dirlist) - ) - # add plugins+libs to sys.path - map(working_set.add, distributions) - # display errors - print('Could not load', errors) - - The `plugin_env` should be an ``Environment`` instance that contains - only distributions that are in the project's "plugin directory" or - directories. The `full_env`, if supplied, should be an ``Environment`` - contains all currently-available distributions. If `full_env` is not - supplied, one is created automatically from the ``WorkingSet`` this - method is called on, which will typically mean that every directory on - ``sys.path`` will be scanned for distributions. - - `installer` is a standard installer callback as used by the - ``resolve()`` method. The `fallback` flag indicates whether we should - attempt to resolve older versions of a plugin if the newest version - cannot be resolved. - - This method returns a 2-tuple: (`distributions`, `error_info`), where - `distributions` is a list of the distributions found in `plugin_env` - that were loadable, along with any other distributions that are needed - to resolve their dependencies. `error_info` is a dictionary mapping - unloadable plugin distributions to an exception instance describing the - error that occurred. Usually this will be a ``DistributionNotFound`` or - ``VersionConflict`` instance. - """ - - plugin_projects = list(plugin_env) - # scan project names in alphabetic order - plugin_projects.sort() - - error_info = {} - distributions = {} - - if full_env is None: - env = Environment(self.entries) - env += plugin_env - else: - env = full_env + plugin_env - - shadow_set = self.__class__([]) - # put all our entries in shadow_set - list(map(shadow_set.add, self)) - - for project_name in plugin_projects: - - for dist in plugin_env[project_name]: - - req = [dist.as_requirement()] - - try: - resolvees = shadow_set.resolve(req, env, installer) - - except ResolutionError as v: - # save error info - error_info[dist] = v - if fallback: - # try the next older version of project - continue - else: - # give up on this project, keep going - break - - else: - list(map(shadow_set.add, resolvees)) - distributions.update(dict.fromkeys(resolvees)) - - # success, no need to try any more versions of this project - break - - distributions = list(distributions) - distributions.sort() - - return distributions, error_info - - def require(self, *requirements): - """Ensure that distributions matching `requirements` are activated - - `requirements` must be a string or a (possibly-nested) sequence - thereof, specifying the distributions and versions required. The - return value is a sequence of the distributions that needed to be - activated to fulfill the requirements; all relevant distributions are - included, even if they were already activated in this working set. - """ - needed = self.resolve(parse_requirements(requirements)) - - for dist in needed: - self.add(dist) - - return needed - - def subscribe(self, callback, existing=True): - """Invoke `callback` for all distributions - - If `existing=True` (default), - call on all existing ones, as well. - """ - if callback in self.callbacks: - return - self.callbacks.append(callback) - if not existing: - return - for dist in self: - callback(dist) - - def _added_new(self, dist): - for callback in self.callbacks: - callback(dist) - - def __getstate__(self): - return ( - self.entries[:], self.entry_keys.copy(), self.by_key.copy(), - self.callbacks[:] - ) - - def __setstate__(self, e_k_b_c): - entries, keys, by_key, callbacks = e_k_b_c - self.entries = entries[:] - self.entry_keys = keys.copy() - self.by_key = by_key.copy() - self.callbacks = callbacks[:] - - -class _ReqExtras(dict): - """ - Map each requirement to the extras that demanded it. - """ - - def markers_pass(self, req, extras=None): - """ - Evaluate markers for req against each extra that - demanded it. - - Return False if the req has a marker and fails - evaluation. Otherwise, return True. - """ - extra_evals = ( - req.marker.evaluate({'extra': extra}) - for extra in self.get(req, ()) + (extras or (None,)) - ) - return not req.marker or any(extra_evals) - - -class Environment(object): - """Searchable snapshot of distributions on a search path""" - - def __init__( - self, search_path=None, platform=get_supported_platform(), - python=PY_MAJOR): - """Snapshot distributions available on a search path - - Any distributions found on `search_path` are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. - - `platform` is an optional string specifying the name of the platform - that platform-specific distributions must be compatible with. If - unspecified, it defaults to the current platform. `python` is an - optional string naming the desired version of Python (e.g. ``'3.3'``); - it defaults to the current version. - - You may explicitly set `platform` (and/or `python`) to ``None`` if you - wish to map *all* distributions, not just those compatible with the - running platform or Python version. - """ - self._distmap = {} - self.platform = platform - self.python = python - self.scan(search_path) - - def can_add(self, dist): - """Is distribution `dist` acceptable for this environment? - - The distribution must match the platform and python version - requirements specified when this environment was created, or False - is returned. - """ - py_compat = ( - self.python is None - or dist.py_version is None - or dist.py_version == self.python - ) - return py_compat and compatible_platforms(dist.platform, self.platform) - - def remove(self, dist): - """Remove `dist` from the environment""" - self._distmap[dist.key].remove(dist) - - def scan(self, search_path=None): - """Scan `search_path` for distributions usable in this environment - - Any distributions found are added to the environment. - `search_path` should be a sequence of ``sys.path`` items. If not - supplied, ``sys.path`` is used. Only distributions conforming to - the platform/python version defined at initialization are added. - """ - if search_path is None: - search_path = sys.path - - for item in search_path: - for dist in find_distributions(item): - self.add(dist) - - def __getitem__(self, project_name): - """Return a newest-to-oldest list of distributions for `project_name` - - Uses case-insensitive `project_name` comparison, assuming all the - project's distributions use their project's name converted to all - lowercase as their key. - - """ - distribution_key = project_name.lower() - return self._distmap.get(distribution_key, []) - - def add(self, dist): - """Add `dist` if we ``can_add()`` it and it has not already been added - """ - if self.can_add(dist) and dist.has_version(): - dists = self._distmap.setdefault(dist.key, []) - if dist not in dists: - dists.append(dist) - dists.sort(key=operator.attrgetter('hashcmp'), reverse=True) - - def best_match( - self, req, working_set, installer=None, replace_conflicting=False): - """Find distribution best matching `req` and usable on `working_set` - - This calls the ``find(req)`` method of the `working_set` to see if a - suitable distribution is already active. (This may raise - ``VersionConflict`` if an unsuitable version of the project is already - active in the specified `working_set`.) If a suitable distribution - isn't active, this method returns the newest distribution in the - environment that meets the ``Requirement`` in `req`. If no suitable - distribution is found, and `installer` is supplied, then the result of - calling the environment's ``obtain(req, installer)`` method will be - returned. - """ - try: - dist = working_set.find(req) - except VersionConflict: - if not replace_conflicting: - raise - dist = None - if dist is not None: - return dist - for dist in self[req.key]: - if dist in req: - return dist - # try to download/install - return self.obtain(req, installer) - - def obtain(self, requirement, installer=None): - """Obtain a distribution matching `requirement` (e.g. via download) - - Obtain a distro that matches requirement (e.g. via download). In the - base ``Environment`` class, this routine just returns - ``installer(requirement)``, unless `installer` is None, in which case - None is returned instead. This method is a hook that allows subclasses - to attempt other ways of obtaining a distribution before falling back - to the `installer` argument.""" - if installer is not None: - return installer(requirement) - - def __iter__(self): - """Yield the unique project names of the available distributions""" - for key in self._distmap.keys(): - if self[key]: - yield key - - def __iadd__(self, other): - """In-place addition of a distribution or environment""" - if isinstance(other, Distribution): - self.add(other) - elif isinstance(other, Environment): - for project in other: - for dist in other[project]: - self.add(dist) - else: - raise TypeError("Can't add %r to environment" % (other,)) - return self - - def __add__(self, other): - """Add an environment or distribution to an environment""" - new = self.__class__([], platform=None, python=None) - for env in self, other: - new += env - return new - - -# XXX backward compatibility -AvailableDistributions = Environment - - -class ExtractionError(RuntimeError): - """An error occurred extracting a resource - - The following attributes are available from instances of this exception: - - manager - The resource manager that raised this exception - - cache_path - The base directory for resource extraction - - original_error - The exception instance that caused extraction to fail - """ - - -class ResourceManager: - """Manage resource extraction and packages""" - extraction_path = None - - def __init__(self): - self.cached_files = {} - - def resource_exists(self, package_or_requirement, resource_name): - """Does the named resource exist?""" - return get_provider(package_or_requirement).has_resource(resource_name) - - def resource_isdir(self, package_or_requirement, resource_name): - """Is the named resource an existing directory?""" - return get_provider(package_or_requirement).resource_isdir( - resource_name - ) - - def resource_filename(self, package_or_requirement, resource_name): - """Return a true filesystem path for specified resource""" - return get_provider(package_or_requirement).get_resource_filename( - self, resource_name - ) - - def resource_stream(self, package_or_requirement, resource_name): - """Return a readable file-like object for specified resource""" - return get_provider(package_or_requirement).get_resource_stream( - self, resource_name - ) - - def resource_string(self, package_or_requirement, resource_name): - """Return specified resource as a string""" - return get_provider(package_or_requirement).get_resource_string( - self, resource_name - ) - - def resource_listdir(self, package_or_requirement, resource_name): - """List the contents of the named resource directory""" - return get_provider(package_or_requirement).resource_listdir( - resource_name - ) - - def extraction_error(self): - """Give an error message for problems extracting file(s)""" - - old_exc = sys.exc_info()[1] - cache_path = self.extraction_path or get_default_cache() - - tmpl = textwrap.dedent(""" - Can't extract file(s) to egg cache - - The following error occurred while trying to extract file(s) - to the Python egg cache: - - {old_exc} - - The Python egg cache directory is currently set to: - - {cache_path} - - Perhaps your account does not have write access to this directory? - You can change the cache directory by setting the PYTHON_EGG_CACHE - environment variable to point to an accessible directory. - """).lstrip() - err = ExtractionError(tmpl.format(**locals())) - err.manager = self - err.cache_path = cache_path - err.original_error = old_exc - raise err - - def get_cache_path(self, archive_name, names=()): - """Return absolute location in cache for `archive_name` and `names` - - The parent directory of the resulting path will be created if it does - not already exist. `archive_name` should be the base filename of the - enclosing egg (which may not be the name of the enclosing zipfile!), - including its ".egg" extension. `names`, if provided, should be a - sequence of path name parts "under" the egg's extraction location. - - This method should only be called by resource providers that need to - obtain an extraction location, and only for names they intend to - extract, as it tracks the generated names for possible cleanup later. - """ - extract_path = self.extraction_path or get_default_cache() - target_path = os.path.join(extract_path, archive_name + '-tmp', *names) - try: - _bypass_ensure_directory(target_path) - except Exception: - self.extraction_error() - - self._warn_unsafe_extraction_path(extract_path) - - self.cached_files[target_path] = 1 - return target_path - - @staticmethod - def _warn_unsafe_extraction_path(path): - """ - If the default extraction path is overridden and set to an insecure - location, such as /tmp, it opens up an opportunity for an attacker to - replace an extracted file with an unauthorized payload. Warn the user - if a known insecure location is used. - - See Distribute #375 for more details. - """ - if os.name == 'nt' and not path.startswith(os.environ['windir']): - # On Windows, permissions are generally restrictive by default - # and temp directories are not writable by other users, so - # bypass the warning. - return - mode = os.stat(path).st_mode - if mode & stat.S_IWOTH or mode & stat.S_IWGRP: - msg = ( - "%s is writable by group/others and vulnerable to attack " - "when " - "used with get_resource_filename. Consider a more secure " - "location (set with .set_extraction_path or the " - "PYTHON_EGG_CACHE environment variable)." % path - ) - warnings.warn(msg, UserWarning) - - def postprocess(self, tempname, filename): - """Perform any platform-specific postprocessing of `tempname` - - This is where Mac header rewrites should be done; other platforms don't - have anything special they should do. - - Resource providers should call this method ONLY after successfully - extracting a compressed resource. They must NOT call it on resources - that are already in the filesystem. - - `tempname` is the current (temporary) name of the file, and `filename` - is the name it will be renamed to by the caller after this routine - returns. - """ - - if os.name == 'posix': - # Make the resource executable - mode = ((os.stat(tempname).st_mode) | 0o555) & 0o7777 - os.chmod(tempname, mode) - - def set_extraction_path(self, path): - """Set the base path where resources will be extracted to, if needed. - - If you do not call this routine before any extractions take place, the - path defaults to the return value of ``get_default_cache()``. (Which - is based on the ``PYTHON_EGG_CACHE`` environment variable, with various - platform-specific fallbacks. See that routine's documentation for more - details.) - - Resources are extracted to subdirectories of this path based upon - information given by the ``IResourceProvider``. You may set this to a - temporary directory, but then you must call ``cleanup_resources()`` to - delete the extracted files when done. There is no guarantee that - ``cleanup_resources()`` will be able to remove all extracted files. - - (Note: you may not change the extraction path for a given resource - manager once resources have been extracted, unless you first call - ``cleanup_resources()``.) - """ - if self.cached_files: - raise ValueError( - "Can't change extraction path, files already extracted" - ) - - self.extraction_path = path - - def cleanup_resources(self, force=False): - """ - Delete all extracted resource files and directories, returning a list - of the file and directory names that could not be successfully removed. - This function does not have any concurrency protection, so it should - generally only be called when the extraction path is a temporary - directory exclusive to a single process. This method is not - automatically called; you must call it explicitly or register it as an - ``atexit`` function if you wish to ensure cleanup of a temporary - directory used for extractions. - """ - # XXX - - -def get_default_cache(): - """ - Return the ``PYTHON_EGG_CACHE`` environment variable - or a platform-relevant user cache dir for an app - named "Python-Eggs". - """ - return ( - os.environ.get('PYTHON_EGG_CACHE') - or appdirs.user_cache_dir(appname='Python-Eggs') - ) - - -def safe_name(name): - """Convert an arbitrary string to a standard distribution name - - Any runs of non-alphanumeric/. characters are replaced with a single '-'. - """ - return re.sub('[^A-Za-z0-9.]+', '-', name) - - -def safe_version(version): - """ - Convert an arbitrary string to a standard version string - """ - try: - # normalize the version - return str(packaging.version.Version(version)) - except packaging.version.InvalidVersion: - version = version.replace(' ', '.') - return re.sub('[^A-Za-z0-9.]+', '-', version) - - -def safe_extra(extra): - """Convert an arbitrary string to a standard 'extra' name - - Any runs of non-alphanumeric characters are replaced with a single '_', - and the result is always lowercased. - """ - return re.sub('[^A-Za-z0-9.-]+', '_', extra).lower() - - -def to_filename(name): - """Convert a project or version name to its filename-escaped form - - Any '-' characters are currently replaced with '_'. - """ - return name.replace('-', '_') - - -def invalid_marker(text): - """ - Validate text as a PEP 508 environment marker; return an exception - if invalid or False otherwise. - """ - try: - evaluate_marker(text) - except SyntaxError as e: - e.filename = None - e.lineno = None - return e - return False - - -def evaluate_marker(text, extra=None): - """ - Evaluate a PEP 508 environment marker. - Return a boolean indicating the marker result in this environment. - Raise SyntaxError if marker is invalid. - - This implementation uses the 'pyparsing' module. - """ - try: - marker = packaging.markers.Marker(text) - return marker.evaluate() - except packaging.markers.InvalidMarker as e: - raise SyntaxError(e) - - -class NullProvider: - """Try to implement resources and metadata for arbitrary PEP 302 loaders""" - - egg_name = None - egg_info = None - loader = None - - def __init__(self, module): - self.loader = getattr(module, '__loader__', None) - self.module_path = os.path.dirname(getattr(module, '__file__', '')) - - def get_resource_filename(self, manager, resource_name): - return self._fn(self.module_path, resource_name) - - def get_resource_stream(self, manager, resource_name): - return io.BytesIO(self.get_resource_string(manager, resource_name)) - - def get_resource_string(self, manager, resource_name): - return self._get(self._fn(self.module_path, resource_name)) - - def has_resource(self, resource_name): - return self._has(self._fn(self.module_path, resource_name)) - - def has_metadata(self, name): - return self.egg_info and self._has(self._fn(self.egg_info, name)) - - def get_metadata(self, name): - if not self.egg_info: - return "" - value = self._get(self._fn(self.egg_info, name)) - return value.decode('utf-8') if six.PY3 else value - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - def resource_isdir(self, resource_name): - return self._isdir(self._fn(self.module_path, resource_name)) - - def metadata_isdir(self, name): - return self.egg_info and self._isdir(self._fn(self.egg_info, name)) - - def resource_listdir(self, resource_name): - return self._listdir(self._fn(self.module_path, resource_name)) - - def metadata_listdir(self, name): - if self.egg_info: - return self._listdir(self._fn(self.egg_info, name)) - return [] - - def run_script(self, script_name, namespace): - script = 'scripts/' + script_name - if not self.has_metadata(script): - raise ResolutionError( - "Script {script!r} not found in metadata at {self.egg_info!r}" - .format(**locals()), - ) - script_text = self.get_metadata(script).replace('\r\n', '\n') - script_text = script_text.replace('\r', '\n') - script_filename = self._fn(self.egg_info, script) - namespace['__file__'] = script_filename - if os.path.exists(script_filename): - source = open(script_filename).read() - code = compile(source, script_filename, 'exec') - exec(code, namespace, namespace) - else: - from linecache import cache - cache[script_filename] = ( - len(script_text), 0, script_text.split('\n'), script_filename - ) - script_code = compile(script_text, script_filename, 'exec') - exec(script_code, namespace, namespace) - - def _has(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _isdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _listdir(self, path): - raise NotImplementedError( - "Can't perform this operation for unregistered loader type" - ) - - def _fn(self, base, resource_name): - if resource_name: - return os.path.join(base, *resource_name.split('/')) - return base - - def _get(self, path): - if hasattr(self.loader, 'get_data'): - return self.loader.get_data(path) - raise NotImplementedError( - "Can't perform this operation for loaders without 'get_data()'" - ) - - -register_loader_type(object, NullProvider) - - -class EggProvider(NullProvider): - """Provider based on a virtual filesystem""" - - def __init__(self, module): - NullProvider.__init__(self, module) - self._setup_prefix() - - def _setup_prefix(self): - # we assume here that our metadata may be nested inside a "basket" - # of multiple eggs; that's why we use module_path instead of .archive - path = self.module_path - old = None - while path != old: - if _is_egg_path(path): - self.egg_name = os.path.basename(path) - self.egg_info = os.path.join(path, 'EGG-INFO') - self.egg_root = path - break - old = path - path, base = os.path.split(path) - - -class DefaultProvider(EggProvider): - """Provides access to package resources in the filesystem""" - - def _has(self, path): - return os.path.exists(path) - - def _isdir(self, path): - return os.path.isdir(path) - - def _listdir(self, path): - return os.listdir(path) - - def get_resource_stream(self, manager, resource_name): - return open(self._fn(self.module_path, resource_name), 'rb') - - def _get(self, path): - with open(path, 'rb') as stream: - return stream.read() - - @classmethod - def _register(cls): - loader_cls = getattr( - importlib_machinery, - 'SourceFileLoader', - type(None), - ) - register_loader_type(loader_cls, cls) - - -DefaultProvider._register() - - -class EmptyProvider(NullProvider): - """Provider that returns nothing for all requests""" - - module_path = None - - _isdir = _has = lambda self, path: False - - def _get(self, path): - return '' - - def _listdir(self, path): - return [] - - def __init__(self): - pass - - -empty_provider = EmptyProvider() - - -class ZipManifests(dict): - """ - zip manifest builder - """ - - @classmethod - def build(cls, path): - """ - Build a dictionary similar to the zipimport directory - caches, except instead of tuples, store ZipInfo objects. - - Use a platform-specific path separator (os.sep) for the path keys - for compatibility with pypy on Windows. - """ - with zipfile.ZipFile(path) as zfile: - items = ( - ( - name.replace('/', os.sep), - zfile.getinfo(name), - ) - for name in zfile.namelist() - ) - return dict(items) - - load = build - - -class MemoizedZipManifests(ZipManifests): - """ - Memoized zipfile manifests. - """ - manifest_mod = collections.namedtuple('manifest_mod', 'manifest mtime') - - def load(self, path): - """ - Load a manifest at path or return a suitable manifest already loaded. - """ - path = os.path.normpath(path) - mtime = os.stat(path).st_mtime - - if path not in self or self[path].mtime != mtime: - manifest = self.build(path) - self[path] = self.manifest_mod(manifest, mtime) - - return self[path].manifest - - -class ZipProvider(EggProvider): - """Resource support for zips and eggs""" - - eagers = None - _zip_manifests = MemoizedZipManifests() - - def __init__(self, module): - EggProvider.__init__(self, module) - self.zip_pre = self.loader.archive + os.sep - - def _zipinfo_name(self, fspath): - # Convert a virtual filename (full path to file) into a zipfile subpath - # usable with the zipimport directory cache for our target archive - fspath = fspath.rstrip(os.sep) - if fspath == self.loader.archive: - return '' - if fspath.startswith(self.zip_pre): - return fspath[len(self.zip_pre):] - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.zip_pre) - ) - - def _parts(self, zip_path): - # Convert a zipfile subpath into an egg-relative path part list. - # pseudo-fs path - fspath = self.zip_pre + zip_path - if fspath.startswith(self.egg_root + os.sep): - return fspath[len(self.egg_root) + 1:].split(os.sep) - raise AssertionError( - "%s is not a subpath of %s" % (fspath, self.egg_root) - ) - - @property - def zipinfo(self): - return self._zip_manifests.load(self.loader.archive) - - def get_resource_filename(self, manager, resource_name): - if not self.egg_name: - raise NotImplementedError( - "resource_filename() only supported for .egg, not .zip" - ) - # no need to lock for extraction, since we use temp names - zip_path = self._resource_to_zip(resource_name) - eagers = self._get_eager_resources() - if '/'.join(self._parts(zip_path)) in eagers: - for name in eagers: - self._extract_resource(manager, self._eager_to_zip(name)) - return self._extract_resource(manager, zip_path) - - @staticmethod - def _get_date_and_size(zip_stat): - size = zip_stat.file_size - # ymdhms+wday, yday, dst - date_time = zip_stat.date_time + (0, 0, -1) - # 1980 offset already done - timestamp = time.mktime(date_time) - return timestamp, size - - def _extract_resource(self, manager, zip_path): - - if zip_path in self._index(): - for name in self._index()[zip_path]: - last = self._extract_resource( - manager, os.path.join(zip_path, name) - ) - # return the extracted directory name - return os.path.dirname(last) - - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - - if not WRITE_SUPPORT: - raise IOError('"os.rename" and "os.unlink" are not supported ' - 'on this platform') - try: - - real_path = manager.get_cache_path( - self.egg_name, self._parts(zip_path) - ) - - if self._is_current(real_path, zip_path): - return real_path - - outf, tmpnam = _mkstemp( - ".$extract", - dir=os.path.dirname(real_path), - ) - os.write(outf, self.loader.get_data(zip_path)) - os.close(outf) - utime(tmpnam, (timestamp, timestamp)) - manager.postprocess(tmpnam, real_path) - - try: - rename(tmpnam, real_path) - - except os.error: - if os.path.isfile(real_path): - if self._is_current(real_path, zip_path): - # the file became current since it was checked above, - # so proceed. - return real_path - # Windows, del old file and retry - elif os.name == 'nt': - unlink(real_path) - rename(tmpnam, real_path) - return real_path - raise - - except os.error: - # report a user-friendly error - manager.extraction_error() - - return real_path - - def _is_current(self, file_path, zip_path): - """ - Return True if the file_path is current for this zip_path - """ - timestamp, size = self._get_date_and_size(self.zipinfo[zip_path]) - if not os.path.isfile(file_path): - return False - stat = os.stat(file_path) - if stat.st_size != size or stat.st_mtime != timestamp: - return False - # check that the contents match - zip_contents = self.loader.get_data(zip_path) - with open(file_path, 'rb') as f: - file_contents = f.read() - return zip_contents == file_contents - - def _get_eager_resources(self): - if self.eagers is None: - eagers = [] - for name in ('native_libs.txt', 'eager_resources.txt'): - if self.has_metadata(name): - eagers.extend(self.get_metadata_lines(name)) - self.eagers = eagers - return self.eagers - - def _index(self): - try: - return self._dirindex - except AttributeError: - ind = {} - for path in self.zipinfo: - parts = path.split(os.sep) - while parts: - parent = os.sep.join(parts[:-1]) - if parent in ind: - ind[parent].append(parts[-1]) - break - else: - ind[parent] = [parts.pop()] - self._dirindex = ind - return ind - - def _has(self, fspath): - zip_path = self._zipinfo_name(fspath) - return zip_path in self.zipinfo or zip_path in self._index() - - def _isdir(self, fspath): - return self._zipinfo_name(fspath) in self._index() - - def _listdir(self, fspath): - return list(self._index().get(self._zipinfo_name(fspath), ())) - - def _eager_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.egg_root, resource_name)) - - def _resource_to_zip(self, resource_name): - return self._zipinfo_name(self._fn(self.module_path, resource_name)) - - -register_loader_type(zipimport.zipimporter, ZipProvider) - - -class FileMetadata(EmptyProvider): - """Metadata handler for standalone PKG-INFO files - - Usage:: - - metadata = FileMetadata("/path/to/PKG-INFO") - - This provider rejects all data and metadata requests except for PKG-INFO, - which is treated as existing, and will be the contents of the file at - the provided location. - """ - - def __init__(self, path): - self.path = path - - def has_metadata(self, name): - return name == 'PKG-INFO' and os.path.isfile(self.path) - - def get_metadata(self, name): - if name != 'PKG-INFO': - raise KeyError("No metadata except PKG-INFO is available") - - with io.open(self.path, encoding='utf-8', errors="replace") as f: - metadata = f.read() - self._warn_on_replacement(metadata) - return metadata - - def _warn_on_replacement(self, metadata): - # Python 2.7 compat for: replacement_char = '�' - replacement_char = b'\xef\xbf\xbd'.decode('utf-8') - if replacement_char in metadata: - tmpl = "{self.path} could not be properly decoded in UTF-8" - msg = tmpl.format(**locals()) - warnings.warn(msg) - - def get_metadata_lines(self, name): - return yield_lines(self.get_metadata(name)) - - -class PathMetadata(DefaultProvider): - """Metadata provider for egg directories - - Usage:: - - # Development eggs: - - egg_info = "/path/to/PackageName.egg-info" - base_dir = os.path.dirname(egg_info) - metadata = PathMetadata(base_dir, egg_info) - dist_name = os.path.splitext(os.path.basename(egg_info))[0] - dist = Distribution(basedir, project_name=dist_name, metadata=metadata) - - # Unpacked egg directories: - - egg_path = "/path/to/PackageName-ver-pyver-etc.egg" - metadata = PathMetadata(egg_path, os.path.join(egg_path,'EGG-INFO')) - dist = Distribution.from_filename(egg_path, metadata=metadata) - """ - - def __init__(self, path, egg_info): - self.module_path = path - self.egg_info = egg_info - - -class EggMetadata(ZipProvider): - """Metadata provider for .egg files""" - - def __init__(self, importer): - """Create a metadata provider from a zipimporter""" - - self.zip_pre = importer.archive + os.sep - self.loader = importer - if importer.prefix: - self.module_path = os.path.join(importer.archive, importer.prefix) - else: - self.module_path = importer.archive - self._setup_prefix() - - -_declare_state('dict', _distribution_finders={}) - - -def register_finder(importer_type, distribution_finder): - """Register `distribution_finder` to find distributions in sys.path items - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `distribution_finder` is a callable that, passed a path - item and the importer instance, yields ``Distribution`` instances found on - that path item. See ``pkg_resources.find_on_path`` for an example.""" - _distribution_finders[importer_type] = distribution_finder - - -def find_distributions(path_item, only=False): - """Yield distributions accessible via `path_item`""" - importer = get_importer(path_item) - finder = _find_adapter(_distribution_finders, importer) - return finder(importer, path_item, only) - - -def find_eggs_in_zip(importer, path_item, only=False): - """ - Find eggs in zip files; possibly multiple nested eggs. - """ - if importer.archive.endswith('.whl'): - # wheels are not supported with this finder - # they don't have PKG-INFO metadata, and won't ever contain eggs - return - metadata = EggMetadata(importer) - if metadata.has_metadata('PKG-INFO'): - yield Distribution.from_filename(path_item, metadata=metadata) - if only: - # don't yield nested distros - return - for subitem in metadata.resource_listdir('/'): - if _is_egg_path(subitem): - subpath = os.path.join(path_item, subitem) - dists = find_eggs_in_zip(zipimport.zipimporter(subpath), subpath) - for dist in dists: - yield dist - elif subitem.lower().endswith('.dist-info'): - subpath = os.path.join(path_item, subitem) - submeta = EggMetadata(zipimport.zipimporter(subpath)) - submeta.egg_info = subpath - yield Distribution.from_location(path_item, subitem, submeta) - - -register_finder(zipimport.zipimporter, find_eggs_in_zip) - - -def find_nothing(importer, path_item, only=False): - return () - - -register_finder(object, find_nothing) - - -def _by_version_descending(names): - """ - Given a list of filenames, return them in descending order - by version number. - - >>> names = 'bar', 'foo', 'Python-2.7.10.egg', 'Python-2.7.2.egg' - >>> _by_version_descending(names) - ['Python-2.7.10.egg', 'Python-2.7.2.egg', 'foo', 'bar'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.egg', 'Setuptools-1.2.3b1.egg'] - >>> names = 'Setuptools-1.2.3b1.egg', 'Setuptools-1.2.3.post1.egg' - >>> _by_version_descending(names) - ['Setuptools-1.2.3.post1.egg', 'Setuptools-1.2.3b1.egg'] - """ - def _by_version(name): - """ - Parse each component of the filename - """ - name, ext = os.path.splitext(name) - parts = itertools.chain(name.split('-'), [ext]) - return [packaging.version.parse(part) for part in parts] - - return sorted(names, key=_by_version, reverse=True) - - -def find_on_path(importer, path_item, only=False): - """Yield distributions accessible on a sys.path directory""" - path_item = _normalize_cached(path_item) - - if _is_unpacked_egg(path_item): - yield Distribution.from_filename( - path_item, metadata=PathMetadata( - path_item, os.path.join(path_item, 'EGG-INFO') - ) - ) - return - - entries = safe_listdir(path_item) - - # for performance, before sorting by version, - # screen entries for only those that will yield - # distributions - filtered = ( - entry - for entry in entries - if dist_factory(path_item, entry, only) - ) - - # scan for .egg and .egg-info in directory - path_item_entries = _by_version_descending(filtered) - for entry in path_item_entries: - fullpath = os.path.join(path_item, entry) - factory = dist_factory(path_item, entry, only) - for dist in factory(fullpath): - yield dist - - -def dist_factory(path_item, entry, only): - """ - Return a dist_factory for a path_item and entry - """ - lower = entry.lower() - is_meta = any(map(lower.endswith, ('.egg-info', '.dist-info'))) - return ( - distributions_from_metadata - if is_meta else - find_distributions - if not only and _is_egg_path(entry) else - resolve_egg_link - if not only and lower.endswith('.egg-link') else - NoDists() - ) - - -class NoDists: - """ - >>> bool(NoDists()) - False - - >>> list(NoDists()('anything')) - [] - """ - def __bool__(self): - return False - if six.PY2: - __nonzero__ = __bool__ - - def __call__(self, fullpath): - return iter(()) - - -def safe_listdir(path): - """ - Attempt to list contents of path, but suppress some exceptions. - """ - try: - return os.listdir(path) - except (PermissionError, NotADirectoryError): - pass - except OSError as e: - # Ignore the directory if does not exist, not a directory or - # permission denied - ignorable = ( - e.errno in (errno.ENOTDIR, errno.EACCES, errno.ENOENT) - # Python 2 on Windows needs to be handled this way :( - or getattr(e, "winerror", None) == 267 - ) - if not ignorable: - raise - return () - - -def distributions_from_metadata(path): - root = os.path.dirname(path) - if os.path.isdir(path): - if len(os.listdir(path)) == 0: - # empty metadata dir; skip - return - metadata = PathMetadata(root, path) - else: - metadata = FileMetadata(path) - entry = os.path.basename(path) - yield Distribution.from_location( - root, entry, metadata, precedence=DEVELOP_DIST, - ) - - -def non_empty_lines(path): - """ - Yield non-empty lines from file at path - """ - with open(path) as f: - for line in f: - line = line.strip() - if line: - yield line - - -def resolve_egg_link(path): - """ - Given a path to an .egg-link, resolve distributions - present in the referenced path. - """ - referenced_paths = non_empty_lines(path) - resolved_paths = ( - os.path.join(os.path.dirname(path), ref) - for ref in referenced_paths - ) - dist_groups = map(find_distributions, resolved_paths) - return next(dist_groups, ()) - - -register_finder(pkgutil.ImpImporter, find_on_path) - -if hasattr(importlib_machinery, 'FileFinder'): - register_finder(importlib_machinery.FileFinder, find_on_path) - -_declare_state('dict', _namespace_handlers={}) -_declare_state('dict', _namespace_packages={}) - - -def register_namespace_handler(importer_type, namespace_handler): - """Register `namespace_handler` to declare namespace packages - - `importer_type` is the type or class of a PEP 302 "Importer" (sys.path item - handler), and `namespace_handler` is a callable like this:: - - def namespace_handler(importer, path_entry, moduleName, module): - # return a path_entry to use for child packages - - Namespace handlers are only called if the importer object has already - agreed that it can handle the relevant path item, and they should only - return a subpath if the module __path__ does not already contain an - equivalent subpath. For an example namespace handler, see - ``pkg_resources.file_ns_handler``. - """ - _namespace_handlers[importer_type] = namespace_handler - - -def _handle_ns(packageName, path_item): - """Ensure that named package includes a subpath of path_item (if needed)""" - - importer = get_importer(path_item) - if importer is None: - return None - loader = importer.find_module(packageName) - if loader is None: - return None - module = sys.modules.get(packageName) - if module is None: - module = sys.modules[packageName] = types.ModuleType(packageName) - module.__path__ = [] - _set_parent_ns(packageName) - elif not hasattr(module, '__path__'): - raise TypeError("Not a package:", packageName) - handler = _find_adapter(_namespace_handlers, importer) - subpath = handler(importer, path_item, packageName, module) - if subpath is not None: - path = module.__path__ - path.append(subpath) - loader.load_module(packageName) - _rebuild_mod_path(path, packageName, module) - return subpath - - -def _rebuild_mod_path(orig_path, package_name, module): - """ - Rebuild module.__path__ ensuring that all entries are ordered - corresponding to their sys.path order - """ - sys_path = [_normalize_cached(p) for p in sys.path] - - def safe_sys_path_index(entry): - """ - Workaround for #520 and #513. - """ - try: - return sys_path.index(entry) - except ValueError: - return float('inf') - - def position_in_sys_path(path): - """ - Return the ordinal of the path based on its position in sys.path - """ - path_parts = path.split(os.sep) - module_parts = package_name.count('.') + 1 - parts = path_parts[:-module_parts] - return safe_sys_path_index(_normalize_cached(os.sep.join(parts))) - - if not isinstance(orig_path, list): - # Is this behavior useful when module.__path__ is not a list? - return - - orig_path.sort(key=position_in_sys_path) - module.__path__[:] = [_normalize_cached(p) for p in orig_path] - - -def declare_namespace(packageName): - """Declare that package 'packageName' is a namespace package""" - - _imp.acquire_lock() - try: - if packageName in _namespace_packages: - return - - path, parent = sys.path, None - if '.' in packageName: - parent = '.'.join(packageName.split('.')[:-1]) - declare_namespace(parent) - if parent not in _namespace_packages: - __import__(parent) - try: - path = sys.modules[parent].__path__ - except AttributeError: - raise TypeError("Not a package:", parent) - - # Track what packages are namespaces, so when new path items are added, - # they can be updated - _namespace_packages.setdefault(parent, []).append(packageName) - _namespace_packages.setdefault(packageName, []) - - for path_item in path: - # Ensure all the parent's path items are reflected in the child, - # if they apply - _handle_ns(packageName, path_item) - - finally: - _imp.release_lock() - - -def fixup_namespace_packages(path_item, parent=None): - """Ensure that previously-declared namespace packages include path_item""" - _imp.acquire_lock() - try: - for package in _namespace_packages.get(parent, ()): - subpath = _handle_ns(package, path_item) - if subpath: - fixup_namespace_packages(subpath, package) - finally: - _imp.release_lock() - - -def file_ns_handler(importer, path_item, packageName, module): - """Compute an ns-package subpath for a filesystem or zipfile importer""" - - subpath = os.path.join(path_item, packageName.split('.')[-1]) - normalized = _normalize_cached(subpath) - for item in module.__path__: - if _normalize_cached(item) == normalized: - break - else: - # Only return the path if it's not already there - return subpath - - -register_namespace_handler(pkgutil.ImpImporter, file_ns_handler) -register_namespace_handler(zipimport.zipimporter, file_ns_handler) - -if hasattr(importlib_machinery, 'FileFinder'): - register_namespace_handler(importlib_machinery.FileFinder, file_ns_handler) - - -def null_ns_handler(importer, path_item, packageName, module): - return None - - -register_namespace_handler(object, null_ns_handler) - - -def normalize_path(filename): - """Normalize a file/dir name for comparison purposes""" - return os.path.normcase(os.path.realpath(filename)) - - -def _normalize_cached(filename, _cache={}): - try: - return _cache[filename] - except KeyError: - _cache[filename] = result = normalize_path(filename) - return result - - -def _is_egg_path(path): - """ - Determine if given path appears to be an egg. - """ - return path.lower().endswith('.egg') - - -def _is_unpacked_egg(path): - """ - Determine if given path appears to be an unpacked egg. - """ - return ( - _is_egg_path(path) and - os.path.isfile(os.path.join(path, 'EGG-INFO', 'PKG-INFO')) - ) - - -def _set_parent_ns(packageName): - parts = packageName.split('.') - name = parts.pop() - if parts: - parent = '.'.join(parts) - setattr(sys.modules[parent], name, sys.modules[packageName]) - - -def yield_lines(strs): - """Yield non-empty/non-comment lines of a string or sequence""" - if isinstance(strs, six.string_types): - for s in strs.splitlines(): - s = s.strip() - # skip blank lines/comments - if s and not s.startswith('#'): - yield s - else: - for ss in strs: - for s in yield_lines(ss): - yield s - - -MODULE = re.compile(r"\w+(\.\w+)*$").match -EGG_NAME = re.compile( - r""" - (?P<name>[^-]+) ( - -(?P<ver>[^-]+) ( - -py(?P<pyver>[^-]+) ( - -(?P<plat>.+) - )? - )? - )? - """, - re.VERBOSE | re.IGNORECASE, -).match - - -class EntryPoint(object): - """Object representing an advertised importable object""" - - def __init__(self, name, module_name, attrs=(), extras=(), dist=None): - if not MODULE(module_name): - raise ValueError("Invalid module name", module_name) - self.name = name - self.module_name = module_name - self.attrs = tuple(attrs) - self.extras = tuple(extras) - self.dist = dist - - def __str__(self): - s = "%s = %s" % (self.name, self.module_name) - if self.attrs: - s += ':' + '.'.join(self.attrs) - if self.extras: - s += ' [%s]' % ','.join(self.extras) - return s - - def __repr__(self): - return "EntryPoint.parse(%r)" % str(self) - - def load(self, require=True, *args, **kwargs): - """ - Require packages for this EntryPoint, then resolve it. - """ - if not require or args or kwargs: - warnings.warn( - "Parameters to load are deprecated. Call .resolve and " - ".require separately.", - DeprecationWarning, - stacklevel=2, - ) - if require: - self.require(*args, **kwargs) - return self.resolve() - - def resolve(self): - """ - Resolve the entry point from its module and attrs. - """ - module = __import__(self.module_name, fromlist=['__name__'], level=0) - try: - return functools.reduce(getattr, self.attrs, module) - except AttributeError as exc: - raise ImportError(str(exc)) - - def require(self, env=None, installer=None): - if self.extras and not self.dist: - raise UnknownExtra("Can't require() without a distribution", self) - - # Get the requirements for this entry point with all its extras and - # then resolve them. We have to pass `extras` along when resolving so - # that the working set knows what extras we want. Otherwise, for - # dist-info distributions, the working set will assume that the - # requirements for that extra are purely optional and skip over them. - reqs = self.dist.requires(self.extras) - items = working_set.resolve(reqs, env, installer, extras=self.extras) - list(map(working_set.add, items)) - - pattern = re.compile( - r'\s*' - r'(?P<name>.+?)\s*' - r'=\s*' - r'(?P<module>[\w.]+)\s*' - r'(:\s*(?P<attr>[\w.]+))?\s*' - r'(?P<extras>\[.*\])?\s*$' - ) - - @classmethod - def parse(cls, src, dist=None): - """Parse a single entry point from string `src` - - Entry point syntax follows the form:: - - name = some.module:some.attr [extra1, extra2] - - The entry name and module name are required, but the ``:attrs`` and - ``[extras]`` parts are optional - """ - m = cls.pattern.match(src) - if not m: - msg = "EntryPoint must be in 'name=module:attrs [extras]' format" - raise ValueError(msg, src) - res = m.groupdict() - extras = cls._parse_extras(res['extras']) - attrs = res['attr'].split('.') if res['attr'] else () - return cls(res['name'], res['module'], attrs, extras, dist) - - @classmethod - def _parse_extras(cls, extras_spec): - if not extras_spec: - return () - req = Requirement.parse('x' + extras_spec) - if req.specs: - raise ValueError() - return req.extras - - @classmethod - def parse_group(cls, group, lines, dist=None): - """Parse an entry point group""" - if not MODULE(group): - raise ValueError("Invalid group name", group) - this = {} - for line in yield_lines(lines): - ep = cls.parse(line, dist) - if ep.name in this: - raise ValueError("Duplicate entry point", group, ep.name) - this[ep.name] = ep - return this - - @classmethod - def parse_map(cls, data, dist=None): - """Parse a map of entry point groups""" - if isinstance(data, dict): - data = data.items() - else: - data = split_sections(data) - maps = {} - for group, lines in data: - if group is None: - if not lines: - continue - raise ValueError("Entry points must be listed in groups") - group = group.strip() - if group in maps: - raise ValueError("Duplicate group name", group) - maps[group] = cls.parse_group(group, lines, dist) - return maps - - -def _remove_md5_fragment(location): - if not location: - return '' - parsed = urllib.parse.urlparse(location) - if parsed[-1].startswith('md5='): - return urllib.parse.urlunparse(parsed[:-1] + ('',)) - return location - - -def _version_from_file(lines): - """ - Given an iterable of lines from a Metadata file, return - the value of the Version field, if present, or None otherwise. - """ - def is_version_line(line): - return line.lower().startswith('version:') - version_lines = filter(is_version_line, lines) - line = next(iter(version_lines), '') - _, _, value = line.partition(':') - return safe_version(value.strip()) or None - - -class Distribution(object): - """Wrap an actual or potential sys.path entry w/metadata""" - PKG_INFO = 'PKG-INFO' - - def __init__( - self, location=None, metadata=None, project_name=None, - version=None, py_version=PY_MAJOR, platform=None, - precedence=EGG_DIST): - self.project_name = safe_name(project_name or 'Unknown') - if version is not None: - self._version = safe_version(version) - self.py_version = py_version - self.platform = platform - self.location = location - self.precedence = precedence - self._provider = metadata or empty_provider - - @classmethod - def from_location(cls, location, basename, metadata=None, **kw): - project_name, version, py_version, platform = [None] * 4 - basename, ext = os.path.splitext(basename) - if ext.lower() in _distributionImpl: - cls = _distributionImpl[ext.lower()] - - match = EGG_NAME(basename) - if match: - project_name, version, py_version, platform = match.group( - 'name', 'ver', 'pyver', 'plat' - ) - return cls( - location, metadata, project_name=project_name, version=version, - py_version=py_version, platform=platform, **kw - )._reload_version() - - def _reload_version(self): - return self - - @property - def hashcmp(self): - return ( - self.parsed_version, - self.precedence, - self.key, - _remove_md5_fragment(self.location), - self.py_version or '', - self.platform or '', - ) - - def __hash__(self): - return hash(self.hashcmp) - - def __lt__(self, other): - return self.hashcmp < other.hashcmp - - def __le__(self, other): - return self.hashcmp <= other.hashcmp - - def __gt__(self, other): - return self.hashcmp > other.hashcmp - - def __ge__(self, other): - return self.hashcmp >= other.hashcmp - - def __eq__(self, other): - if not isinstance(other, self.__class__): - # It's not a Distribution, so they are not equal - return False - return self.hashcmp == other.hashcmp - - def __ne__(self, other): - return not self == other - - # These properties have to be lazy so that we don't have to load any - # metadata until/unless it's actually needed. (i.e., some distributions - # may not know their name or version without loading PKG-INFO) - - @property - def key(self): - try: - return self._key - except AttributeError: - self._key = key = self.project_name.lower() - return key - - @property - def parsed_version(self): - if not hasattr(self, "_parsed_version"): - self._parsed_version = parse_version(self.version) - - return self._parsed_version - - def _warn_legacy_version(self): - LV = packaging.version.LegacyVersion - is_legacy = isinstance(self._parsed_version, LV) - if not is_legacy: - return - - # While an empty version is technically a legacy version and - # is not a valid PEP 440 version, it's also unlikely to - # actually come from someone and instead it is more likely that - # it comes from setuptools attempting to parse a filename and - # including it in the list. So for that we'll gate this warning - # on if the version is anything at all or not. - if not self.version: - return - - tmpl = textwrap.dedent(""" - '{project_name} ({version})' is being parsed as a legacy, - non PEP 440, - version. You may find odd behavior and sort order. - In particular it will be sorted as less than 0.0. It - is recommended to migrate to PEP 440 compatible - versions. - """).strip().replace('\n', ' ') - - warnings.warn(tmpl.format(**vars(self)), PEP440Warning) - - @property - def version(self): - try: - return self._version - except AttributeError: - version = _version_from_file(self._get_metadata(self.PKG_INFO)) - if version is None: - tmpl = "Missing 'Version:' header and/or %s file" - raise ValueError(tmpl % self.PKG_INFO, self) - return version - - @property - def _dep_map(self): - """ - A map of extra to its list of (direct) requirements - for this distribution, including the null extra. - """ - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._filter_extras(self._build_dep_map()) - return self.__dep_map - - @staticmethod - def _filter_extras(dm): - """ - Given a mapping of extras to dependencies, strip off - environment markers and filter out any dependencies - not matching the markers. - """ - for extra in list(filter(None, dm)): - new_extra = extra - reqs = dm.pop(extra) - new_extra, _, marker = extra.partition(':') - fails_marker = marker and ( - invalid_marker(marker) - or not evaluate_marker(marker) - ) - if fails_marker: - reqs = [] - new_extra = safe_extra(new_extra) or None - - dm.setdefault(new_extra, []).extend(reqs) - return dm - - def _build_dep_map(self): - dm = {} - for name in 'requires.txt', 'depends.txt': - for extra, reqs in split_sections(self._get_metadata(name)): - dm.setdefault(extra, []).extend(parse_requirements(reqs)) - return dm - - def requires(self, extras=()): - """List of Requirements needed for this distro if `extras` are used""" - dm = self._dep_map - deps = [] - deps.extend(dm.get(None, ())) - for ext in extras: - try: - deps.extend(dm[safe_extra(ext)]) - except KeyError: - raise UnknownExtra( - "%s has no such extra feature %r" % (self, ext) - ) - return deps - - def _get_metadata(self, name): - if self.has_metadata(name): - for line in self.get_metadata_lines(name): - yield line - - def activate(self, path=None, replace=False): - """Ensure distribution is importable on `path` (default=sys.path)""" - if path is None: - path = sys.path - self.insert_on(path, replace=replace) - if path is sys.path: - fixup_namespace_packages(self.location) - for pkg in self._get_metadata('namespace_packages.txt'): - if pkg in sys.modules: - declare_namespace(pkg) - - def egg_name(self): - """Return what this distribution's standard .egg filename should be""" - filename = "%s-%s-py%s" % ( - to_filename(self.project_name), to_filename(self.version), - self.py_version or PY_MAJOR - ) - - if self.platform: - filename += '-' + self.platform - return filename - - def __repr__(self): - if self.location: - return "%s (%s)" % (self, self.location) - else: - return str(self) - - def __str__(self): - try: - version = getattr(self, 'version', None) - except ValueError: - version = None - version = version or "[unknown version]" - return "%s %s" % (self.project_name, version) - - def __getattr__(self, attr): - """Delegate all unrecognized public attributes to .metadata provider""" - if attr.startswith('_'): - raise AttributeError(attr) - return getattr(self._provider, attr) - - @classmethod - def from_filename(cls, filename, metadata=None, **kw): - return cls.from_location( - _normalize_cached(filename), os.path.basename(filename), metadata, - **kw - ) - - def as_requirement(self): - """Return a ``Requirement`` that matches this distribution exactly""" - if isinstance(self.parsed_version, packaging.version.Version): - spec = "%s==%s" % (self.project_name, self.parsed_version) - else: - spec = "%s===%s" % (self.project_name, self.parsed_version) - - return Requirement.parse(spec) - - def load_entry_point(self, group, name): - """Return the `name` entry point of `group` or raise ImportError""" - ep = self.get_entry_info(group, name) - if ep is None: - raise ImportError("Entry point %r not found" % ((group, name),)) - return ep.load() - - def get_entry_map(self, group=None): - """Return the entry point map for `group`, or the full entry map""" - try: - ep_map = self._ep_map - except AttributeError: - ep_map = self._ep_map = EntryPoint.parse_map( - self._get_metadata('entry_points.txt'), self - ) - if group is not None: - return ep_map.get(group, {}) - return ep_map - - def get_entry_info(self, group, name): - """Return the EntryPoint object for `group`+`name`, or ``None``""" - return self.get_entry_map(group).get(name) - - def insert_on(self, path, loc=None, replace=False): - """Ensure self.location is on path - - If replace=False (default): - - If location is already in path anywhere, do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent. - - Else: add to the end of path. - If replace=True: - - If location is already on path anywhere (not eggs) - or higher priority than its parent (eggs) - do nothing. - - Else: - - If it's an egg and its parent directory is on path, - insert just ahead of the parent, - removing any lower-priority entries. - - Else: add it to the front of path. - """ - - loc = loc or self.location - if not loc: - return - - nloc = _normalize_cached(loc) - bdir = os.path.dirname(nloc) - npath = [(p and _normalize_cached(p) or p) for p in path] - - for p, item in enumerate(npath): - if item == nloc: - if replace: - break - else: - # don't modify path (even removing duplicates) if - # found and not replace - return - elif item == bdir and self.precedence == EGG_DIST: - # if it's an .egg, give it precedence over its directory - # UNLESS it's already been added to sys.path and replace=False - if (not replace) and nloc in npath[p:]: - return - if path is sys.path: - self.check_version_conflict() - path.insert(p, loc) - npath.insert(p, nloc) - break - else: - if path is sys.path: - self.check_version_conflict() - if replace: - path.insert(0, loc) - else: - path.append(loc) - return - - # p is the spot where we found or inserted loc; now remove duplicates - while True: - try: - np = npath.index(nloc, p + 1) - except ValueError: - break - else: - del npath[np], path[np] - # ha! - p = np - - return - - def check_version_conflict(self): - if self.key == 'setuptools': - # ignore the inevitable setuptools self-conflicts :( - return - - nsp = dict.fromkeys(self._get_metadata('namespace_packages.txt')) - loc = normalize_path(self.location) - for modname in self._get_metadata('top_level.txt'): - if (modname not in sys.modules or modname in nsp - or modname in _namespace_packages): - continue - if modname in ('pkg_resources', 'setuptools', 'site'): - continue - fn = getattr(sys.modules[modname], '__file__', None) - if fn and (normalize_path(fn).startswith(loc) or - fn.startswith(self.location)): - continue - issue_warning( - "Module %s was already imported from %s, but %s is being added" - " to sys.path" % (modname, fn, self.location), - ) - - def has_version(self): - try: - self.version - except ValueError: - issue_warning("Unbuilt egg for " + repr(self)) - return False - return True - - def clone(self, **kw): - """Copy this distribution, substituting in any changed keyword args""" - names = 'project_name version py_version platform location precedence' - for attr in names.split(): - kw.setdefault(attr, getattr(self, attr, None)) - kw.setdefault('metadata', self._provider) - return self.__class__(**kw) - - @property - def extras(self): - return [dep for dep in self._dep_map if dep] - - -class EggInfoDistribution(Distribution): - def _reload_version(self): - """ - Packages installed by distutils (e.g. numpy or scipy), - which uses an old safe_version, and so - their version numbers can get mangled when - converted to filenames (e.g., 1.11.0.dev0+2329eae to - 1.11.0.dev0_2329eae). These distributions will not be - parsed properly - downstream by Distribution and safe_version, so - take an extra step and try to get the version number from - the metadata file itself instead of the filename. - """ - md_version = _version_from_file(self._get_metadata(self.PKG_INFO)) - if md_version: - self._version = md_version - return self - - -class DistInfoDistribution(Distribution): - """ - Wrap an actual or potential sys.path entry - w/metadata, .dist-info style. - """ - PKG_INFO = 'METADATA' - EQEQ = re.compile(r"([\(,])\s*(\d.*?)\s*([,\)])") - - @property - def _parsed_pkg_info(self): - """Parse and cache metadata""" - try: - return self._pkg_info - except AttributeError: - metadata = self.get_metadata(self.PKG_INFO) - self._pkg_info = email.parser.Parser().parsestr(metadata) - return self._pkg_info - - @property - def _dep_map(self): - try: - return self.__dep_map - except AttributeError: - self.__dep_map = self._compute_dependencies() - return self.__dep_map - - def _compute_dependencies(self): - """Recompute this distribution's dependencies.""" - dm = self.__dep_map = {None: []} - - reqs = [] - # Including any condition expressions - for req in self._parsed_pkg_info.get_all('Requires-Dist') or []: - reqs.extend(parse_requirements(req)) - - def reqs_for_extra(extra): - for req in reqs: - if not req.marker or req.marker.evaluate({'extra': extra}): - yield req - - common = frozenset(reqs_for_extra(None)) - dm[None].extend(common) - - for extra in self._parsed_pkg_info.get_all('Provides-Extra') or []: - s_extra = safe_extra(extra.strip()) - dm[s_extra] = list(frozenset(reqs_for_extra(extra)) - common) - - return dm - - -_distributionImpl = { - '.egg': Distribution, - '.egg-info': EggInfoDistribution, - '.dist-info': DistInfoDistribution, -} - - -def issue_warning(*args, **kw): - level = 1 - g = globals() - try: - # find the first stack frame that is *not* code in - # the pkg_resources module, to use for the warning - while sys._getframe(level).f_globals is g: - level += 1 - except ValueError: - pass - warnings.warn(stacklevel=level + 1, *args, **kw) - - -class RequirementParseError(ValueError): - def __str__(self): - return ' '.join(self.args) - - -def parse_requirements(strs): - """Yield ``Requirement`` objects for each specification in `strs` - - `strs` must be a string, or a (possibly-nested) iterable thereof. - """ - # create a steppable iterator, so we can handle \-continuations - lines = iter(yield_lines(strs)) - - for line in lines: - # Drop comments -- a hash without a space may be in a URL. - if ' #' in line: - line = line[:line.find(' #')] - # If there is a line continuation, drop it, and append the next line. - if line.endswith('\\'): - line = line[:-2].strip() - try: - line += next(lines) - except StopIteration: - return - yield Requirement(line) - - -class Requirement(packaging.requirements.Requirement): - def __init__(self, requirement_string): - """DO NOT CALL THIS UNDOCUMENTED METHOD; use Requirement.parse()!""" - try: - super(Requirement, self).__init__(requirement_string) - except packaging.requirements.InvalidRequirement as e: - raise RequirementParseError(str(e)) - self.unsafe_name = self.name - project_name = safe_name(self.name) - self.project_name, self.key = project_name, project_name.lower() - self.specs = [ - (spec.operator, spec.version) for spec in self.specifier] - self.extras = tuple(map(safe_extra, self.extras)) - self.hashCmp = ( - self.key, - self.specifier, - frozenset(self.extras), - str(self.marker) if self.marker else None, - ) - self.__hash = hash(self.hashCmp) - - def __eq__(self, other): - return ( - isinstance(other, Requirement) and - self.hashCmp == other.hashCmp - ) - - def __ne__(self, other): - return not self == other - - def __contains__(self, item): - if isinstance(item, Distribution): - if item.key != self.key: - return False - - item = item.version - - # Allow prereleases always in order to match the previous behavior of - # this method. In the future this should be smarter and follow PEP 440 - # more accurately. - return self.specifier.contains(item, prereleases=True) - - def __hash__(self): - return self.__hash - - def __repr__(self): - return "Requirement.parse(%r)" % str(self) - - @staticmethod - def parse(s): - req, = parse_requirements(s) - return req - - -def _always_object(classes): - """ - Ensure object appears in the mro even - for old-style classes. - """ - if object not in classes: - return classes + (object,) - return classes - - -def _find_adapter(registry, ob): - """Return an adapter factory for `ob` from `registry`""" - types = _always_object(inspect.getmro(getattr(ob, '__class__', type(ob)))) - for t in types: - if t in registry: - return registry[t] - - -def ensure_directory(path): - """Ensure that the parent directory of `path` exists""" - dirname = os.path.dirname(path) - py31compat.makedirs(dirname, exist_ok=True) - - -def _bypass_ensure_directory(path): - """Sandbox-bypassing version of ensure_directory()""" - if not WRITE_SUPPORT: - raise IOError('"os.mkdir" not supported on this platform.') - dirname, filename = split(path) - if dirname and filename and not isdir(dirname): - _bypass_ensure_directory(dirname) - mkdir(dirname, 0o755) - - -def split_sections(s): - """Split a string or iterable thereof into (section, content) pairs - - Each ``section`` is a stripped version of the section header ("[section]") - and each ``content`` is a list of stripped lines excluding blank lines and - comment-only lines. If there are any such lines before the first section - header, they're returned in a first ``section`` of ``None``. - """ - section = None - content = [] - for line in yield_lines(s): - if line.startswith("["): - if line.endswith("]"): - if section or content: - yield section, content - section = line[1:-1].strip() - content = [] - else: - raise ValueError("Invalid section heading", line) - else: - content.append(line) - - # wrap up last segment - yield section, content - - -def _mkstemp(*args, **kw): - old_open = os.open - try: - # temporarily bypass sandboxing - os.open = os_open - return tempfile.mkstemp(*args, **kw) - finally: - # and then put it back - os.open = old_open - - -# Silence the PEP440Warning by default, so that end users don't get hit by it -# randomly just because they use pkg_resources. We want to append the rule -# because we want earlier uses of filterwarnings to take precedence over this -# one. -warnings.filterwarnings("ignore", category=PEP440Warning, append=True) - - -# from jaraco.functools 1.3 -def _call_aside(f, *args, **kwargs): - f(*args, **kwargs) - return f - - -@_call_aside -def _initialize(g=globals()): - "Set up global resource manager (deliberately not state-saved)" - manager = ResourceManager() - g['_manager'] = manager - g.update( - (name, getattr(manager, name)) - for name in dir(manager) - if not name.startswith('_') - ) - - -@_call_aside -def _initialize_master_working_set(): - """ - Prepare the master working set and make the ``require()`` - API available. - - This function has explicit effects on the global state - of pkg_resources. It is intended to be invoked once at - the initialization of this module. - - Invocation by other packages is unsupported and done - at their own risk. - """ - working_set = WorkingSet._build_master() - _declare_state('object', working_set=working_set) - - require = working_set.require - iter_entry_points = working_set.iter_entry_points - add_activation_listener = working_set.subscribe - run_script = working_set.run_script - # backward compatibility - run_main = run_script - # Activate all distributions already on sys.path with replace=False and - # ensure that all distributions added to the working set in the future - # (e.g. by calling ``require()``) will get activated as well, - # with higher priority (replace=True). - tuple( - dist.activate(replace=False) - for dist in working_set - ) - add_activation_listener( - lambda dist: dist.activate(replace=True), - existing=False, - ) - working_set.entries = [] - # match order - list(map(working_set.add_entry, sys.path)) - globals().update(locals()) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 32a2e29..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/py31compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/py31compat.cpython-36.pyc deleted file mode 100644 index a49bff6..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/__pycache__/py31compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 75241a2..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-36.pyc deleted file mode 100644 index f4786ef..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/appdirs.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-36.pyc deleted file mode 100644 index a7df642..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/pyparsing.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-36.pyc deleted file mode 100644 index 9366967..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/__pycache__/six.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/appdirs.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/appdirs.py deleted file mode 100644 index 32e7c9f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/appdirs.py +++ /dev/null @@ -1,552 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# Copyright (c) 2005-2010 ActiveState Software Inc. -# Copyright (c) 2013 Eddy Petrișor - -"""Utilities for determining application-specific dirs. - -See <http://github.com/ActiveState/appdirs> for details and usage. -""" -# Dev Notes: -# - MSDN on where to store app data files: -# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120 -# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html -# - XDG spec for Un*x: http://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html - -__version_info__ = (1, 4, 0) -__version__ = '.'.join(map(str, __version_info__)) - - -import sys -import os - -PY3 = sys.version_info[0] == 3 - -if PY3: - unicode = str - -if sys.platform.startswith('java'): - import platform - os_name = platform.java_ver()[3][0] - if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc. - system = 'win32' - elif os_name.startswith('Mac'): # "Mac OS X", etc. - system = 'darwin' - else: # "Linux", "SunOS", "FreeBSD", etc. - # Setting this to "linux2" is not ideal, but only Windows or Mac - # are actually checked for and the rest of the module expects - # *sys.platform* style strings. - system = 'linux2' -else: - system = sys.platform - - - -def user_data_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: ~/Library/Application Support/<AppName> - Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined - Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName> - Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName> - Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName> - Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName> - - For Unix, we follow the XDG spec and support $XDG_DATA_HOME. - That means, by default "~/.local/share/<AppName>". - """ - if system == "win32": - if appauthor is None: - appauthor = appname - const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA" - path = os.path.normpath(_get_win_folder(const)) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('~/Library/Application Support/') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_data_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of data dirs should be - returned. By default, the first item from XDG_DATA_DIRS is - returned, or '/usr/local/share/<AppName>', - if XDG_DATA_DIRS is not set - - Typical user data directories are: - Mac OS X: /Library/Application Support/<AppName> - Unix: /usr/local/share/<AppName> or /usr/share/<AppName> - Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName> - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7. - - For Unix, this is using the $XDG_DATA_DIRS[0] default. - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - elif system == 'darwin': - path = os.path.expanduser('/Library/Application Support') - if appname: - path = os.path.join(path, appname) - else: - # XDG default for $XDG_DATA_DIRS - # only first, if multipath is False - path = os.getenv('XDG_DATA_DIRS', - os.pathsep.join(['/usr/local/share', '/usr/share'])) - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - if appname and version: - path = os.path.join(path, version) - return path - - -def user_config_dir(appname=None, appauthor=None, version=None, roaming=False): - r"""Return full path to the user-specific config dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "roaming" (boolean, default False) can be set True to use the Windows - roaming appdata directory. That means that for users on a Windows - network setup for roaming profiles, this user data will be - sync'd on login. See - <http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx> - for a discussion of issues. - - Typical user data directories are: - Mac OS X: same as user_data_dir - Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined - Win *: same as user_data_dir - - For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME. - That means, by deafult "~/.config/<AppName>". - """ - if system in ["win32", "darwin"]: - path = user_data_dir(appname, appauthor, None, roaming) - else: - path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config")) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def site_config_dir(appname=None, appauthor=None, version=None, multipath=False): - r"""Return full path to the user-shared data dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "multipath" is an optional parameter only applicable to *nix - which indicates that the entire list of config dirs should be - returned. By default, the first item from XDG_CONFIG_DIRS is - returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set - - Typical user data directories are: - Mac OS X: same as site_data_dir - Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in - $XDG_CONFIG_DIRS - Win *: same as site_data_dir - Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.) - - For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False - - WARNING: Do not use this on Windows. See the Vista-Fail note above for why. - """ - if system in ["win32", "darwin"]: - path = site_data_dir(appname, appauthor) - if appname and version: - path = os.path.join(path, version) - else: - # XDG default for $XDG_CONFIG_DIRS - # only first, if multipath is False - path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg') - pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)] - if appname: - if version: - appname = os.path.join(appname, version) - pathlist = [os.sep.join([x, appname]) for x in pathlist] - - if multipath: - path = os.pathsep.join(pathlist) - else: - path = pathlist[0] - return path - - -def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific cache dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Cache" to the base app data dir for Windows. See - discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Caches/<AppName> - Unix: ~/.cache/<AppName> (XDG default) - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache - - On Windows the only suggestion in the MSDN docs is that local settings go in - the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming - app data dir (the default returned by `user_data_dir` above). Apps typically - put cache data somewhere *under* the given dir here. Some examples: - ...\Mozilla\Firefox\Profiles\<ProfileName>\Cache - ...\Acme\SuperApp\Cache\1.0 - OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value. - This can be disabled with the `opinion=False` option. - """ - if system == "win32": - if appauthor is None: - appauthor = appname - path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA")) - if appname: - if appauthor is not False: - path = os.path.join(path, appauthor, appname) - else: - path = os.path.join(path, appname) - if opinion: - path = os.path.join(path, "Cache") - elif system == 'darwin': - path = os.path.expanduser('~/Library/Caches') - if appname: - path = os.path.join(path, appname) - else: - path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache')) - if appname: - path = os.path.join(path, appname) - if appname and version: - path = os.path.join(path, version) - return path - - -def user_log_dir(appname=None, appauthor=None, version=None, opinion=True): - r"""Return full path to the user-specific log dir for this application. - - "appname" is the name of application. - If None, just the system directory is returned. - "appauthor" (only used on Windows) is the name of the - appauthor or distributing body for this application. Typically - it is the owning company name. This falls back to appname. You may - pass False to disable it. - "version" is an optional version path element to append to the - path. You might want to use this if you want multiple versions - of your app to be able to run independently. If used, this - would typically be "<major>.<minor>". - Only applied when appname is present. - "opinion" (boolean) can be False to disable the appending of - "Logs" to the base app data dir for Windows, and "log" to the - base cache dir for Unix. See discussion below. - - Typical user cache directories are: - Mac OS X: ~/Library/Logs/<AppName> - Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined - Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs - Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs - - On Windows the only suggestion in the MSDN docs is that local settings - go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in - examples of what some windows apps use for a logs dir.) - - OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA` - value for Windows and appends "log" to the user cache dir for Unix. - This can be disabled with the `opinion=False` option. - """ - if system == "darwin": - path = os.path.join( - os.path.expanduser('~/Library/Logs'), - appname) - elif system == "win32": - path = user_data_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "Logs") - else: - path = user_cache_dir(appname, appauthor, version) - version = False - if opinion: - path = os.path.join(path, "log") - if appname and version: - path = os.path.join(path, version) - return path - - -class AppDirs(object): - """Convenience wrapper for getting application dirs.""" - def __init__(self, appname, appauthor=None, version=None, roaming=False, - multipath=False): - self.appname = appname - self.appauthor = appauthor - self.version = version - self.roaming = roaming - self.multipath = multipath - - @property - def user_data_dir(self): - return user_data_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_data_dir(self): - return site_data_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_config_dir(self): - return user_config_dir(self.appname, self.appauthor, - version=self.version, roaming=self.roaming) - - @property - def site_config_dir(self): - return site_config_dir(self.appname, self.appauthor, - version=self.version, multipath=self.multipath) - - @property - def user_cache_dir(self): - return user_cache_dir(self.appname, self.appauthor, - version=self.version) - - @property - def user_log_dir(self): - return user_log_dir(self.appname, self.appauthor, - version=self.version) - - -#---- internal support stuff - -def _get_win_folder_from_registry(csidl_name): - """This is a fallback technique at best. I'm not sure if using the - registry for this guarantees us the correct answer for all CSIDL_* - names. - """ - import _winreg - - shell_folder_name = { - "CSIDL_APPDATA": "AppData", - "CSIDL_COMMON_APPDATA": "Common AppData", - "CSIDL_LOCAL_APPDATA": "Local AppData", - }[csidl_name] - - key = _winreg.OpenKey( - _winreg.HKEY_CURRENT_USER, - r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders" - ) - dir, type = _winreg.QueryValueEx(key, shell_folder_name) - return dir - - -def _get_win_folder_with_pywin32(csidl_name): - from win32com.shell import shellcon, shell - dir = shell.SHGetFolderPath(0, getattr(shellcon, csidl_name), 0, 0) - # Try to make this a unicode path because SHGetFolderPath does - # not return unicode strings when there is unicode data in the - # path. - try: - dir = unicode(dir) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - try: - import win32api - dir = win32api.GetShortPathName(dir) - except ImportError: - pass - except UnicodeError: - pass - return dir - - -def _get_win_folder_with_ctypes(csidl_name): - import ctypes - - csidl_const = { - "CSIDL_APPDATA": 26, - "CSIDL_COMMON_APPDATA": 35, - "CSIDL_LOCAL_APPDATA": 28, - }[csidl_name] - - buf = ctypes.create_unicode_buffer(1024) - ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf) - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in buf: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf2 = ctypes.create_unicode_buffer(1024) - if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024): - buf = buf2 - - return buf.value - -def _get_win_folder_with_jna(csidl_name): - import array - from com.sun import jna - from com.sun.jna.platform import win32 - - buf_size = win32.WinDef.MAX_PATH * 2 - buf = array.zeros('c', buf_size) - shell = win32.Shell32.INSTANCE - shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf) - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - # Downgrade to short path name if have highbit chars. See - # <http://bugs.activestate.com/show_bug.cgi?id=85099>. - has_high_char = False - for c in dir: - if ord(c) > 255: - has_high_char = True - break - if has_high_char: - buf = array.zeros('c', buf_size) - kernel = win32.Kernel32.INSTANCE - if kernal.GetShortPathName(dir, buf, buf_size): - dir = jna.Native.toString(buf.tostring()).rstrip("\0") - - return dir - -if system == "win32": - try: - import win32com.shell - _get_win_folder = _get_win_folder_with_pywin32 - except ImportError: - try: - from ctypes import windll - _get_win_folder = _get_win_folder_with_ctypes - except ImportError: - try: - import com.sun.jna - _get_win_folder = _get_win_folder_with_jna - except ImportError: - _get_win_folder = _get_win_folder_from_registry - - -#---- self test code - -if __name__ == "__main__": - appname = "MyApp" - appauthor = "MyCompany" - - props = ("user_data_dir", "site_data_dir", - "user_config_dir", "site_config_dir", - "user_cache_dir", "user_log_dir") - - print("-- app dirs (with optional 'version')") - dirs = AppDirs(appname, appauthor, version="1.0") - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'version')") - dirs = AppDirs(appname, appauthor) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (without optional 'appauthor')") - dirs = AppDirs(appname) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) - - print("\n-- app dirs (with disabled 'appauthor')") - dirs = AppDirs(appname, appauthor=False) - for prop in props: - print("%s: %s" % (prop, getattr(dirs, prop))) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__about__.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__about__.py deleted file mode 100644 index 95d330e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__about__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "16.8" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py deleted file mode 100644 index 5ee6220..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -from .__about__ import ( - __author__, __copyright__, __email__, __license__, __summary__, __title__, - __uri__, __version__ -) - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-36.pyc deleted file mode 100644 index 0741ce1..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__about__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index d0b5509..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-36.pyc deleted file mode 100644 index cf5fc84..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-36.pyc deleted file mode 100644 index 3c89064..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/_structures.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-36.pyc deleted file mode 100644 index ed4c9e4..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/markers.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-36.pyc deleted file mode 100644 index 9526c3c..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/requirements.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc deleted file mode 100644 index 0937a82..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-36.pyc deleted file mode 100644 index 651a78a..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/utils.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-36.pyc deleted file mode 100644 index fd10a3b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/__pycache__/version.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_compat.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_compat.py deleted file mode 100644 index 210bb80..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_compat.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = str, -else: - string_types = basestring, - - -def with_metaclass(meta, *bases): - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_structures.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_structures.py deleted file mode 100644 index ccc2786..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/_structures.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - - -class Infinity(object): - - def __repr__(self): - return "Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return False - - def __le__(self, other): - return False - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return True - - def __ge__(self, other): - return True - - def __neg__(self): - return NegativeInfinity - -Infinity = Infinity() - - -class NegativeInfinity(object): - - def __repr__(self): - return "-Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return True - - def __le__(self, other): - return True - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return False - - def __ge__(self, other): - return False - - def __neg__(self): - return Infinity - -NegativeInfinity = NegativeInfinity() diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/markers.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/markers.py deleted file mode 100644 index 892e578..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/markers.py +++ /dev/null @@ -1,301 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import operator -import os -import platform -import sys - -from pkg_resources.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd -from pkg_resources.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString -from pkg_resources.extern.pyparsing import Literal as L # noqa - -from ._compat import string_types -from .specifiers import Specifier, InvalidSpecifier - - -__all__ = [ - "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", - "Marker", "default_environment", -] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node(object): - - def __init__(self, value): - self.value = value - - def __str__(self): - return str(self.value) - - def __repr__(self): - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) - - def serialize(self): - raise NotImplementedError - - -class Variable(Node): - - def serialize(self): - return str(self) - - -class Value(Node): - - def serialize(self): - return '"{0}"'.format(self) - - -class Op(Node): - - def serialize(self): - return str(self) - - -VARIABLE = ( - L("implementation_version") | - L("platform_python_implementation") | - L("implementation_name") | - L("python_full_version") | - L("platform_release") | - L("platform_version") | - L("platform_machine") | - L("platform_system") | - L("python_version") | - L("sys_platform") | - L("os_name") | - L("os.name") | # PEP-345 - L("sys.platform") | # PEP-345 - L("platform.version") | # PEP-345 - L("platform.machine") | # PEP-345 - L("platform.python_implementation") | # PEP-345 - L("python_implementation") | # undocumented setuptools legacy - L("extra") -) -ALIASES = { - 'os.name': 'os_name', - 'sys.platform': 'sys_platform', - 'platform.version': 'platform_version', - 'platform.machine': 'platform_machine', - 'platform.python_implementation': 'platform_python_implementation', - 'python_implementation': 'platform_python_implementation' -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | - L("==") | - L(">=") | - L("<=") | - L("!=") | - L("~=") | - L(">") | - L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results): - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker(marker, first=True): - assert isinstance(marker, (list, tuple, string_types)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if (isinstance(marker, list) and len(marker) == 1 and - isinstance(marker[0], (list, tuple))): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs, op, rhs): - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) - - return oper(lhs, rhs) - - -_undefined = object() - - -def _get_env(environment, name): - value = environment.get(name, _undefined) - - if value is _undefined: - raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) - ) - - return value - - -def _evaluate_markers(markers, environment): - groups = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info): - version = '{0.major}.{0.minor}.{0.micro}'.format(info) - kind = info.releaselevel - if kind != 'final': - version += kind[0] + str(info.serial) - return version - - -def default_environment(): - if hasattr(sys, 'implementation'): - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - else: - iver = '0' - implementation_name = '' - - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], - "sys_platform": sys.platform, - } - - -class Marker(object): - - def __init__(self, marker): - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc:e.loc + 8]) - raise InvalidMarker(err_str) - - def __str__(self): - return _format_marker(self._markers) - - def __repr__(self): - return "<Marker({0!r})>".format(str(self)) - - def evaluate(self, environment=None): - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/requirements.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/requirements.py deleted file mode 100644 index 0c8c4a3..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/requirements.py +++ /dev/null @@ -1,127 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import string -import re - -from pkg_resources.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException -from pkg_resources.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine -from pkg_resources.extern.pyparsing import Literal as L # noqa -from pkg_resources.extern.six.moves.urllib import parse as urlparse - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r'[^ ]+')("url") -URL = (AT + URI) - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), - joinString=",", adjacent=False)("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start:t._original_end]) -) -MARKER_SEPERATOR = SEMICOLON -MARKER = MARKER_SEPERATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = \ - NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd - - -class Requirement(object): - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string): - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - "Invalid requirement, parse error at \"{0!r}\"".format( - requirement_string[e.loc:e.loc + 8])) - - self.name = req.name - if req.url: - parsed_url = urlparse.urlparse(req.url) - if not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc): - raise InvalidRequirement("Invalid URL given") - self.url = req.url - else: - self.url = None - self.extras = set(req.extras.asList() if req.extras else []) - self.specifier = SpecifierSet(req.specifier) - self.marker = req.marker if req.marker else None - - def __str__(self): - parts = [self.name] - - if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append("@ {0}".format(self.url)) - - if self.marker: - parts.append("; {0}".format(self.marker)) - - return "".join(parts) - - def __repr__(self): - return "<Requirement({0!r})>".format(str(self)) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/specifiers.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/specifiers.py deleted file mode 100644 index 7f5a76c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,774 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import abc -import functools -import itertools -import re - -from ._compat import string_types, with_metaclass -from .version import Version, LegacyVersion, parse - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): - - @abc.abstractmethod - def __str__(self): - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self): - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self): - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value): - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item, prereleases=None): - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter(self, iterable, prereleases=None): - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators = {} - - def __init__(self, spec="", prereleases=None): - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - - self._spec = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<{0}({1!r}{2})>".format( - self.__class__.__name__, - str(self), - pre, - ) - - def __str__(self): - return "{0}{1}".format(*self._spec) - - def __hash__(self): - return hash(self._spec) - - def __eq__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec == other._spec - - def __ne__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op): - return getattr(self, "_compare_{0}".format(self._operators[op])) - - def _coerce_version(self, version): - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self): - return self._spec[0] - - @property - def version(self): - return self._spec[1] - - @property - def prereleases(self): - return self._prereleases - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - return self._get_operator(self.operator)(item, self.version) - - def filter(self, iterable, prereleases=None): - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing - # else matches this specifier. - if (parsed_version.is_prerelease and not - (prereleases or self.prereleases)): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the begining. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(==|!=|<=|>=|<|>)) - \s* - (?P<version> - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def _coerce_version(self, version): - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective, spec): - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective, spec): - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective, spec): - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective, spec): - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective, spec): - return prospective > self._coerce_version(spec) - - -def _require_version_compare(fn): - @functools.wraps(fn) - def wrapped(self, prospective, spec): - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(~=|==|!=|<=|>=|<|>|===)) - (?P<version> - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?<!==|!=|~=) # We have special cases for these - # operators so we want to make sure they - # don't match here. - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "~=": "compatible", - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective, spec): - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. - prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not - x.startswith("dev")), - _version_split(spec), - ) - )[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return (self._get_operator(">=")(prospective, spec) and - self._get_operator("==")(prospective, prefix)) - - @_require_version_compare - def _compare_equal(self, prospective, spec): - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - prospective = prospective[:len(spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - spec, prospective = _pad_version(spec, prospective) - else: - # Convert our spec string into a Version - spec = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec.local: - prospective = Version(prospective.public) - - return prospective == spec - - @_require_version_compare - def _compare_not_equal(self, prospective, spec): - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - return prospective <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is techincally greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective, spec): - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self): - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version): - result = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _pad_version(left, right): - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]):]) - right_split.append(right[len(right_split[0]):]) - - # Insert our padding - left_split.insert( - 1, - ["0"] * max(0, len(right_split[0]) - len(left_split[0])), - ) - right_split.insert( - 1, - ["0"] * max(0, len(left_split[0]) - len(right_split[0])), - ) - - return ( - list(itertools.chain(*left_split)), - list(itertools.chain(*right_split)), - ) - - -class SpecifierSet(BaseSpecifier): - - def __init__(self, specifiers="", prereleases=None): - # Split on , to break each indidivual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed = set() - for specifier in specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<SpecifierSet({0!r}{1})>".format(str(self), pre) - - def __str__(self): - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self): - return hash(self._specs) - - def __and__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self): - return len(self._specs) - - def __iter__(self): - return iter(self._specs) - - @property - def prereleases(self): - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all( - s.contains(item, prereleases=prereleases) - for s in self._specs - ) - - def filter(self, iterable, prereleases=None): - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered = [] - found_prereleases = [] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/utils.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/utils.py deleted file mode 100644 index 942387c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/utils.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import re - - -_canonicalize_regex = re.compile(r"[-_.]+") - - -def canonicalize_name(name): - # This is taken from PEP 503. - return _canonicalize_regex.sub("-", name).lower() diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/version.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/version.py deleted file mode 100644 index 83b5ee8..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/packaging/version.py +++ /dev/null @@ -1,393 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import collections -import itertools -import re - -from ._structures import Infinity - - -__all__ = [ - "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" -] - - -_Version = collections.namedtuple( - "_Version", - ["epoch", "release", "dev", "pre", "post", "local"], -) - - -def parse(version): - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion(object): - - def __hash__(self): - return hash(self._key) - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): - if not isinstance(other, _BaseVersion): - return NotImplemented - - return method(self._key, other._key) - - -class LegacyVersion(_BaseVersion): - - def __init__(self, version): - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - def __str__(self): - return self._version - - def __repr__(self): - return "<LegacyVersion({0})>".format(repr(str(self))) - - @property - def public(self): - return self._version - - @property - def base_version(self): - return self._version - - @property - def local(self): - return None - - @property - def is_prerelease(self): - return False - - @property - def is_postrelease(self): - return False - - -_legacy_version_component_re = re.compile( - r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, -) - -_legacy_version_replacement_map = { - "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", -} - - -def _parse_version_parts(s): - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - parts = tuple(parts) - - return epoch, parts - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P<epoch>[0-9]+)!)? # epoch - (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment - (?P<pre> # pre-release - [-_\.]? - (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) - [-_\.]? - (?P<pre_n>[0-9]+)? - )? - (?P<post> # post release - (?:-(?P<post_n1>[0-9]+)) - | - (?: - [-_\.]? - (?P<post_l>post|rev|r) - [-_\.]? - (?P<post_n2>[0-9]+)? - ) - )? - (?P<dev> # dev release - [-_\.]? - (?P<dev_l>dev) - [-_\.]? - (?P<dev_n>[0-9]+)? - )? - ) - (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version -""" - - -class Version(_BaseVersion): - - _regex = re.compile( - r"^\s*" + VERSION_PATTERN + r"\s*$", - re.VERBOSE | re.IGNORECASE, - ) - - def __init__(self, version): - # Validate the version and parse it into pieces - match = self._regex.search(version) - if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) - - # Store the parsed out pieces of the version - self._version = _Version( - epoch=int(match.group("epoch")) if match.group("epoch") else 0, - release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version( - match.group("pre_l"), - match.group("pre_n"), - ), - post=_parse_letter_version( - match.group("post_l"), - match.group("post_n1") or match.group("post_n2"), - ), - dev=_parse_letter_version( - match.group("dev_l"), - match.group("dev_n"), - ), - local=_parse_local_version(match.group("local")), - ) - - # Generate a key which will be used for sorting - self._key = _cmpkey( - self._version.epoch, - self._version.release, - self._version.pre, - self._version.post, - self._version.dev, - self._version.local, - ) - - def __repr__(self): - return "<Version({0})>".format(repr(str(self))) - - def __str__(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - # Pre-release - if self._version.pre is not None: - parts.append("".join(str(x) for x in self._version.pre)) - - # Post-release - if self._version.post is not None: - parts.append(".post{0}".format(self._version.post[1])) - - # Development release - if self._version.dev is not None: - parts.append(".dev{0}".format(self._version.dev[1])) - - # Local version segment - if self._version.local is not None: - parts.append( - "+{0}".format(".".join(str(x) for x in self._version.local)) - ) - - return "".join(parts) - - @property - def public(self): - return str(self).split("+", 1)[0] - - @property - def base_version(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - return "".join(parts) - - @property - def local(self): - version_string = str(self) - if "+" in version_string: - return version_string.split("+", 1)[1] - - @property - def is_prerelease(self): - return bool(self._version.dev or self._version.pre) - - @property - def is_postrelease(self): - return bool(self._version.post) - - -def _parse_letter_version(letter, number): - if letter: - # We consider there to be an implicit 0 in a pre-release if there is - # not a numeral associated with it. - if number is None: - number = 0 - - # We normalize any letters to their lower case form - letter = letter.lower() - - # We consider some words to be alternate spellings of other words and - # in those cases we want to normalize the spellings to our preferred - # spelling. - if letter == "alpha": - letter = "a" - elif letter == "beta": - letter = "b" - elif letter in ["c", "pre", "preview"]: - letter = "rc" - elif letter in ["rev", "r"]: - letter = "post" - - return letter, int(number) - if not letter and number: - # We assume if we are given a number, but we are not given a letter - # then this is using the implicit post release syntax (e.g. 1.0-1) - letter = "post" - - return letter, int(number) - - -_local_version_seperators = re.compile(r"[\._-]") - - -def _parse_local_version(local): - """ - Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). - """ - if local is not None: - return tuple( - part.lower() if not part.isdigit() else int(part) - for part in _local_version_seperators.split(local) - ) - - -def _cmpkey(epoch, release, pre, post, dev, local): - # When we compare a release version, we want to compare it with all of the - # trailing zeros removed. So we'll use a reverse the list, drop all the now - # leading zeros until we come to something non zero, then take the rest - # re-reverse it back into the correct order and make it a tuple and use - # that for our sorting key. - release = tuple( - reversed(list( - itertools.dropwhile( - lambda x: x == 0, - reversed(release), - ) - )) - ) - - # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. - # We'll do this by abusing the pre segment, but we _only_ want to do this - # if there is not a pre or a post segment. If we have one of those then - # the normal sorting rules will handle this case correctly. - if pre is None and post is None and dev is not None: - pre = -Infinity - # Versions without a pre-release (except as noted above) should sort after - # those with one. - elif pre is None: - pre = Infinity - - # Versions without a post segment should sort before those with one. - if post is None: - post = -Infinity - - # Versions without a development segment should sort after those with one. - if dev is None: - dev = Infinity - - if local is None: - # Versions without a local segment should sort before those with one. - local = -Infinity - else: - # Versions with a local segment need that segment parsed to implement - # the sorting rules in PEP440. - # - Alpha numeric segments sort before numeric segments - # - Alpha numeric segments sort lexicographically - # - Numeric segments sort numerically - # - Shorter versions sort before longer versions when the prefixes - # match exactly - local = tuple( - (i, "") if isinstance(i, int) else (-Infinity, i) - for i in local - ) - - return epoch, release, pre, post, dev, local diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/pyparsing.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/pyparsing.py deleted file mode 100644 index 38a8d2f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/pyparsing.py +++ /dev/null @@ -1,5696 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2016 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements -(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to -L{Literal} expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" - -__version__ = "2.1.10" -__versionTime__ = "07 Oct 2016 01:31 UTC" -__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', -] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex(r'&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__,fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) - for from_,to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -class _Constants(object): - pass - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - Expected integer (at char 0), (line:1, col:1) - column: 1 - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like L{ParseFatalException}, but thrown internally when an - L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop - immediately because an unbacktrackable syntax error has been found""" - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """ - Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) - - Example:: - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - prints:: - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,(int,slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return ( not not self.__toklist ) - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def _iterkeys( self ): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues( self ): - return (self[k] for k in self._iterkeys()) - - def _iteritems( self ): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys (Python 3.x only).""" - - values = _itervalues - """Returns an iterator of all named result values (Python 3.x only).""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples (Python 3.x only).""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys( self ): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values( self ): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items( self ): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys( self ): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop( self, *args, **kwargs): - """ - Removes and returns item at specified index (default=C{last}). - Supports both C{list} and C{dict} semantics for C{pop()}. If passed no - argument or an integer argument, it will use C{list} semantics - and pop tokens from the list of parsed tokens. If passed a - non-integer argument (most likely a string), it will use C{dict} - semantics and pop the corresponding value from any defined - results names. A second default return value argument is - supported, just as in C{dict.pop()}. - - Example:: - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - prints:: - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k,v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or - args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified. - - Similar to C{dict.get()}. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to C{list.insert()}. - - Example:: - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append( self, item ): - """ - Add single element to end of ParseResults list of elements. - - Example:: - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend( self, itemseq ): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self += itemseq - else: - self.__toklist.extend(itemseq) - - def clear( self ): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__( self, name ): - try: - return self[name] - except KeyError: - return "" - - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a<0 else a+offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] - - def asDict( self ): - """ - Returns the named parse results as a nested dictionary. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k,toItem(v)) for k,v in item_fn()) - - def copy( self ): - """ - Returns a new copy of a C{ParseResults} object. - """ - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - for i,res in enumerate(self.__toklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] - - out += [ nl, indent, "</", selfTag, ">" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - r""" - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - prints:: - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - next(iter(self.__tokdict.values()))[0][1] in (0,-1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', depth=0, full=True): - """ - Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - prints:: - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - out.append( indent+_ustr(self.asList()) ) - if full: - if self.haskeys(): - items = sorted((str(k), v) for k,v in self.items()) - for k,v in items: - if out: - out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v: - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): - v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the C{pprint} module. - Accepts additional positional or keyword args as defined for the - C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) - - Example:: - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - prints:: - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - (self.__tokdict, - par, - inAccumNames, - self.__name) = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return (dir(type(self)) + list(self.keys())) - -collections.MutableMapping.register(ParseResults) - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - s = strg - return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc,strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s,l,t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3,5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3,5,0) else -2 - frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] - return [(frame_summary.filename, frame_summary.lineno)] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [(frame_summary.filename, frame_summary.lineno)] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - del tb - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "<parse action>" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars( chars ): - r""" - Overrides the default whitespace chars - - Example:: - # default whitespace chars are space, <TAB> and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """ - Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element. - - Example:: - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - prints:: - [5120, 100, 655360, 268435456] - Equivalent form of C{expr.copy()} is just C{expr()}:: - integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - """ - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. - - Example:: - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches=True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction( self, *fns, **kwargs ): - """ - Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Optional keyword arguments: - - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}<parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - - Example:: - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction( self, *fns, **kwargs ): - """ - Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. - - See examples in L{I{copy}<copy>}. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, - functions passed to C{addCondition} need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - msg = kwargs.get("message", "failed user-defined condition") - exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException - for fn in fns: - def pa(s,l,t): - if not bool(_trim_arity(fn)(s,l,t)): - raise exc_type(s,l,msg) - self.parseAction.append(pa) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{L{ParseFatalException}} - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - if len(cache) > size: - cache.popitem(False) - - def clear(self): - cache.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - if len(cache) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return (value[0], value[1].copy()) - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - cache_size_limit - (default=C{128}) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - - Example:: - import pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString( self, instring, parseAll=False ): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{L{StringEnd()}}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}<parseWithTabs>}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - - Example:: - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - loc = self.preParse( instring, loc ) - se = Empty() + StringEnd() - se._parse( instring, loc ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}<parseString>} for more information on parsing - strings with embedded tabs. - - Example:: - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens,start,end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn( instring, loc ) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def transformString( self, instring ): - """ - Extension to C{L{scanString}}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string. - - Example:: - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - Prints:: - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of <TAB>s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def searchString( self, instring, maxMatches=_MAX_INT ): - """ - Another extension to C{L{scanString}}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - - Example:: - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - prints:: - ['More', 'Iron', 'Lead', 'Gold', 'I'] - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional C{maxsplit} argument, to limit the number of splits; - and the optional C{includeSeparators} argument (default=C{False}), if the separating - matching text should be included in the split results. - - Example:: - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - prints:: - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t,s,e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other ): - """ - Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement - converts them to L{Literal}s by default. - - Example:: - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - Prints:: - Hello, World! -> ['Hello', ',', 'World', '!'] - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """ - Implementation of + operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns C{L{And}} with error stop - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, And._ErrorStop(), other ] ) - - def __rsub__(self, other ): - """ - Implementation of - operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """ - Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + L{ZeroOrMore}(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """ - Implementation of | operator - returns C{L{MatchFirst}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """ - Implementation of | operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """ - Implementation of ^ operator - returns C{L{Or}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """ - Implementation of ^ operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """ - Implementation of & operator - returns C{L{Each}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """ - Implementation of & operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """ - Implementation of ~ operator - returns C{L{NotAny}} - """ - return NotAny( self ) - - def __call__(self, name=None): - """ - Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. - - If C{name} is omitted, same as calling C{L{copy}}. - - Example:: - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - """ - if name is not None: - return self.setResultsName(name) - else: - return self.copy() - - def suppress( self ): - """ - Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """ - Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """ - Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{<TAB>} characters. - """ - self.keepTabs = True - return self - - def ignore( self, other ): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """ - Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable. - - Example:: - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using L{setDebugActions}. Prior to attempting - to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} - is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} - message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. - """ - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or vars(self) == vars(other) - elif isinstance(other, basestring): - return self.matches(other) - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - - Example:: - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - - comment - (default=C{'#'}) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default=C{True}) prints test output to stdout - - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if C{failureTests} is True), and the results contain a list of lines of each - test's output - - Example:: - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - prints:: - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - allResults = [] - comments = [] - success = True - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n'.join(comments), t] - comments = [] - try: - t = t.replace(r'\n','\n') - result = self.parseString(t, parseAll=parseAll) - out.append(result.dump(full=fullDump)) - success = success and not failureTests - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) - else: - out.append(' '*pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - - if printResults: - if fullDump: - out.append('') - print('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class Token(ParserElement): - """ - Abstract C{ParserElement} subclass, for defining atomic matching patterns. - """ - def __init__( self ): - super(Token,self).__init__( savelist=False ) - - -class Empty(Token): - """ - An empty token, will always match. - """ - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl( self, instring, loc, doActions=True ): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use L{CaselessLiteral}. - - For keyword matching (force word break before and after the matched string), - use L{Keyword} or L{CaselessKeyword}. - """ - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{L{Literal}}: - - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$" - - C{caseless} allows case-insensitive matching, default is C{False}. - - Example:: - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use L{CaselessKeyword}. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=None, caseless=False ): - super(Keyword,self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for L{CaselessKeyword}.) - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of L{Keyword}. - - Example:: - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for L{CaselessLiteral}.) - """ - def __init__( self, matchString, identChars=None ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - -class CloseMatch(Token): - """ - A variation on L{Literal} which matches "close" matches, that is, - strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - - C{match_string} - string to be matched - - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match - - The results from a successful parse will contain the matched text from the input string and the following named results: - - C{mismatches} - a list of the positions within the match_string where mismatches were found - - C{original} - the original match_string used to compare against the input string - - If C{mismatches} is an empty list, then the match was an exact match. - - Example:: - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch,self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl( self, instring, loc, doActions=True ): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): - src,mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = self.match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """ - Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. An optional - C{excludeChars} parameter can list characters that might be found in - the input C{bodyChars} string; useful to define a word of all printables - except for one or two characters, for instance. - - L{srange} is useful for defining custom character set strings for defining - C{Word} expressions, using range notation from regular expression character sets. - - A common mistake is to use C{Word} to match a specific literal string, as in - C{Word("Address")}. Remember that C{Word} uses the string argument to define - I{sets} of matchable characters. This expression would match "Add", "AAA", - "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. - To match an exact literal string, use L{Literal} or L{Keyword}. - - pyparsing includes helper strings for building Words: - - L{alphas} - - L{nums} - - L{alphanums} - - L{hexnums} - - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - L{printables} (any non-whitespace character) - - Example:: - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums+'-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): - super(Word,self).__init__() - if excludeChars: - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except Exception: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(Word,self).__str__() - except Exception: - pass - - - if self.strRepr is None: - - def charsAsStr(s): - if len(s)>4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Regex(Token): - r""" - Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as - named parse results. - - Example:: - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') - # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0): - """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - d = result.groupdict() - ret = ParseResults(result.group()) - if d: - for k in d: - ret[k] = d[k] - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=C{None}) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) - - Example:: - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - prints:: - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t' : '\t', - r'\n' : '\n', - r'\f' : '\f', - r'\r' : '\r', - } - for wslit,wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern,r"\g<1>",ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """ - Token for matching words composed of characters I{not} in a given set (will - include whitespace in matched characters if not listed in the provided exclusion set - see example). - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - - Example:: - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - prints:: - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """ - Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{L{Word}} class. - """ - whiteStrs = { - " " : "<SPC>", - "\t": "<TAB>", - "\n": "<LF>", - "\r": "<CR>", - "\f": "<FF>", - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) - #~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """ - Token to advance to a specific column of input text; useful for tabular report scraping. - """ - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - - -class LineStart(_PositionToken): - """ - Matches if current position is at the beginning of a line within the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - Prints:: - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__( self ): - super(LineStart,self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl( self, instring, loc, doActions=True ): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """ - Matches if current position is at the end of a line within the parse string - """ - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - - def parseImpl( self, instring, loc, doActions=True ): - if loc<len(instring): - if instring[loc] == "\n": - return loc+1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """ - Matches if current position is at the beginning of the parse string - """ - def __init__( self ): - super(StringStart,self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse( instring, 0 ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """ - Matches if current position is at the end of the parse string - """ - def __init__( self ): - super(StringEnd,self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """ - Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """ - Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc<instrlen: - if (instring[loc] in self.wordChars or - instring[loc-1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """ - Abstract subclass of ParserElement, for combining and post-processing parsed tokens. - """ - def __init__( self, exprs, savelist = False ): - super(ParseExpression,self).__init__(savelist) - if isinstance( exprs, _generatorType ): - exprs = list(exprs) - - if isinstance( exprs, basestring ): - self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, collections.Iterable ): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(ParserElement._literalStringClass, exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list( exprs ) - except TypeError: - self.exprs = [ exprs ] - self.callPreparse = False - - def __getitem__( self, i ): - return self.exprs[i] - - def append( self, other ): - self.exprs.append( other ) - self.strRepr = None - return self - - def leaveWhitespace( self ): - """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [ e.copy() for e in self.exprs ] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - else: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - return self - - def __str__( self ): - try: - return super(ParseExpression,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) - return self.strRepr - - def streamline( self ): - super(ParseExpression,self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if ( len(self.exprs) == 2 ): - other = self.exprs[0] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = other.exprs[:] + [ self.exprs[1] ] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ParseExpression,self).setResultsName(name,listAllMatches) - return ret - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion( [] ) - - def copy(self): - ret = super(ParseExpression,self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - -class And(ParseExpression): - """ - Requires all given C{ParseExpression}s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the C{'+'} operator. - May also be constructed using the C{'-'} operator, which will suppress backtracking. - - Example:: - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"),name_expr("name"),integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop,self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__( self, exprs, savelist = True ): - super(And,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars( self.exprs[0].whiteChars ) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def parseImpl( self, instring, loc, doActions=True ): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse( instring, loc, doActions ) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse( instring, loc, doActions ) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #And( [ self, other ] ) - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - if not e.mayReturnEmpty: - break - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the expression that matches the longest string will be used. - May be constructed using the C{'^'} operator. - - Example:: - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - prints:: - [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(Or,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse( instring, loc ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - matches.sort(key=lambda x: -x[0]) - for _,e in matches: - try: - return e._parse( instring, loc, doActions ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. - - Example:: - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """ - Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. - - Example:: - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - prints:: - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e),e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """ - Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. - """ - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - if issubclass(ParserElement._literalStringClass, Token): - expr = ParserElement._literalStringClass(expr) - else: - expr = ParserElement._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """ - Lookahead matching of the given parse expression. C{FollowedBy} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list. - - Example:: - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - prints:: - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. C{NotAny} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression does I{not} match at the current - position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator. - - Example:: - - """ - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__( self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = ParserElement._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - - def parseImpl( self, instring, loc, doActions=True ): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self_expr_parse( instring, preloc, doActions ) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to L{OneOrMore} - """ - def __init__( self, expr, stopOn=None): - super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException,IndexError): - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - prints:: - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - def __init__( self, expr, default=_optionalNotMatched ): - super(Optional,self).__init__( expr, savelist=False ) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default=C{False}) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default=C{None}) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default=C{None}) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - prints:: - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.asList = False - if isinstance(failOn, basestring): - self.failOn = ParserElement._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - Converting to use the '<<=' operator instead will avoid this problem. - - See L{ParseResults.pprint} for an example of a recursive parser created using - C{Forward}. - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - return self.__class__.__name__ + ": ..." - - # stubbed out for now - creates awful memory and perf issues - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret <<= self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of C{ParseExpression}, for converting parsed results. - """ - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Combine(TokenConverter): - """ - Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. - - Example:: - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """ - Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. - - Example:: - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """ - Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. - - Example:: - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - prints:: - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - See more examples at L{ParseResults} of accessing fields by results name. - """ - def __init__( self, expr ): - super(Dict,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """ - Converter for ignoring the results of a parsed expression. - - Example:: - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - prints:: - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - (See also L{delimitedList}.) - """ - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """ - Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """ - Decorator for debugging parse actions. - - When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} - When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. - - Example:: - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - prints:: - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - <<leaving remove_duplicate_chars (ret: 'dfjkls') - ['dfjkls'] - """ - f = _trim_arity(f) - def z(*paArgs): - thisFunc = f.__name__ - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) - raise - sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) - return ret - try: - z.__name__ = f.__name__ - except AttributeError: - pass - return z - -# -# global helpers -# -def delimitedList( expr, delim=",", combine=False ): - """ - Helper to define a delimited list of expressions - the delimiter defaults to ','. - By default, the list elements and delimiters can have intervening whitespace, and - comments, but this can be overridden by passing C{combine=True} in the constructor. - If C{combine} is set to C{True}, the matching tokens are returned as a single token - string, with the delimiters included; otherwise, the matching tokens are returned - as a list of tokens, with the delimiters suppressed. - - Example:: - delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." - if combine: - return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) - else: - return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) - -def countedArray( expr, intExpr=None ): - """ - Helper to define a counted list of expressions. - This helper defines a pattern of the form:: - integer expr expr expr... - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. - - If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. - - Example:: - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s,l,t): - n = t[0] - arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t:int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i,list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches a - previous literal, will also match the leading C{"1:1"} in C{"1:10"}. - If this is not desired, use C{matchPreviousExpr}. - Do I{not} use with packrat parsing enabled. - """ - rep = Forward() - def copyTokenToRepeater(s,l,t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches by - expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; - the expressions are evaluated first, and then compared, so - C{"1"} is compared with C{"10"}. - Do I{not} use with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s,l,t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s,l,t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException("",0,"") - rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - #~ escape these chars: ^-] - for c in r"\^-]": - s = s.replace(c,_bslash+c) - s = s.replace("\n",r"\n") - s = s.replace("\t",r"\t") - return _ustr(s) - -def oneOf( strs, caseless=False, useRegex=True ): - """ - Helper to quickly define a set of alternative Literals, and makes sure to do - longest-first testing when there is a conflict, regardless of the input order, - but returns a C{L{MatchFirst}} for best performance. - - Parameters: - - strs - a string of space-delimited literals, or a collection of string literals - - caseless - (default=C{False}) - treat all literals as caseless - - useRegex - (default=C{True}) - as an optimization, will generate a Regex - object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or - if creating a C{Regex} raises an exception) - - Example:: - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - prints:: - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if caseless: - isequal = ( lambda a,b: a.upper() == b.upper() ) - masks = ( lambda a,b: b.upper().startswith(a.upper()) ) - parseElementClass = CaselessLiteral - else: - isequal = ( lambda a,b: a == b ) - masks = ( lambda a,b: b.startswith(a) ) - parseElementClass = Literal - - symbols = [] - if isinstance(strs,basestring): - symbols = strs.split() - elif isinstance(strs, collections.Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - i = 0 - while i < len(symbols)-1: - cur = symbols[i] - for j,other in enumerate(symbols[i+1:]): - if ( isequal(other, cur) ): - del symbols[i+j+1] - break - elif ( masks(cur, other) ): - del symbols[i+j+1] - symbols.insert(i,other) - cur = other - break - else: - i += 1 - - if not caseless and useRegex: - #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) - else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf( key, value ): - """ - Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. - - Example:: - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - prints:: - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) - -def originalTextFor(expr, asString=True): - """ - Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. By default, returns astring containing the original parsed text. - - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{L{ParseResults}} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values. - - Example:: - src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b","i"): - opener,closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - prints:: - ['<b> bold <i>text</i> </b>'] - ['<i>text</i>'] - """ - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """ - Helper to undo pyparsing's default grouping of And expressions, even - if all but one are non-empty. - """ - return TokenConverter(expr).setParseAction(lambda t:t[0]) - -def locatedExpr(expr): - """ - Helper to decorate a returned token with its starting and ending locations in the input string. - This helper adds the following results names: - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains C{<TAB>} characters, you may want to call - C{L{ParserElement.parseWithTabs}} - - Example:: - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - prints:: - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s,l,t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -def srange(s): - r""" - Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be: - - a single character - - an escaped character with a leading backslash (such as C{\-} or C{\]}) - - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) - (C{\0x##} is also supported for backwards compatibility) - - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) - """ - _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """ - Helper method for defining parse actions that require matching at a specific - column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """ - Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{L{transformString<ParserElement.transformString>}()}. - - Example:: - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s,l,t: [replStr] - -def removeQuotes(s,l,t): - """ - Helper parse action for removing quotation marks from parsed quoted strings. - - Example:: - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """ - Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional - args are passed, they are forwarded to the given function as additional arguments after - the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the - parsed data to an integer using base 16. - - Example (compare the last to example in L{ParserElement.transformString}:: - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - prints:: - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s,l,t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join(c for c in printables if c not in ">") - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("</") + tagStr + ">") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches - tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. - - Example:: - text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' - # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple - a,a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the <A> tag (like "href" shown here) are also accessible as named results - print(link.link_text, '->', link.href) - prints:: - pyparsing -> http://pyparsing.wikispaces.com - """ - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for XML, given a tag name. Matches - tags only in the given upper/lower case. - - Example: similar to L{makeHTMLTags} - """ - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """ - Helper to create a validating parse action to be used with start tags created - with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{<TD>} or C{<DIV>}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - - Example:: - html = ''' - <div> - Some text - <div type="grid">1 4 0 1 0</div> - <div type="graph">1,3 2,3 1,1</div> - <div>this has no type</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """ - Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - - Example:: - html = ''' - <div> - Some text - <div class="grid">1 4 0 1 0</div> - <div class="graph">1,3 2,3 1,1</div> - <div>this <div> has no class</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """ - Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. The generated parser will also recognize the use - of parentheses to override operator precedences (see example below). - - Note: if you define a deep operator list, you may see performance issues - when using infixNotation. See L{ParserElement.enablePackrat} for a - mechanism to potentially improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) - - Example:: - # simple example of four-function arithmetic with ints and variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - prints:: - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - matchExpr.setParseAction( pa ) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """ - Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - - content - expression for items within the nested lists (default=C{None}) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. - - Example:: - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - prints:: - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """ - Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=C{True}) - - A valid block must contain at least one C{blockStatement}. - - Example:: - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - prints:: - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) -commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form C{/* ... */}" - -htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") -"Comment of the form C{<!-- ... -->}" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form C{// ... (to end of line)}" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" - -javaStyleComment = cppStyleComment -"Same as C{L{cppStyleComment}}" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form C{# ... (to end of line)}" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. - This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """ - Here are some common low-level expressions that may be useful in jump-starting parser development: - - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) - - common L{programming identifiers<identifier>} - - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) - - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} - - L{UUID<uuid>} - - L{comma-separated list<comma_separated_list>} - Parse actions: - - C{L{convertToInteger}} - - C{L{convertToFloat}} - - C{L{convertToDate}} - - C{L{convertToDatetime}} - - C{L{stripHTMLTags}} - - C{L{upcaseTokens}} - - C{L{downcaseTokens}} - - Example:: - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - prints:: - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (C{0.0.0.0 - 255.255.255.255})" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) - - Example:: - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - prints:: - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """ - Helper to create a parse action for converting parsed datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) - - Example:: - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - prints:: - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (C{yyyy-mm-dd})" - - iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """ - Parse action to remove HTML tags from web page HTML source - - Example:: - # strip HTML links from normal text - text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' - td,td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - - print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/six.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/six.py deleted file mode 100644 index 190c023..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/_vendor/six.py +++ /dev/null @@ -1,868 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__init__.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__init__.py deleted file mode 100644 index b4156fe..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -import sys - - -class VendorImporter: - """ - A PEP 302 meta path importer for finding optionally-vendored - or otherwise naturally-installed packages from root_name. - """ - - def __init__(self, root_name, vendored_names=(), vendor_pkg=None): - self.root_name = root_name - self.vendored_names = set(vendored_names) - self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') - - @property - def search_path(self): - """ - Search first the vendor package then as a natural package. - """ - yield self.vendor_pkg + '.' - yield '' - - def find_module(self, fullname, path=None): - """ - Return self when fullname starts with root_name and the - target module is one vendored through this importer. - """ - root, base, target = fullname.partition(self.root_name + '.') - if root: - return - if not any(map(target.startswith, self.vendored_names)): - return - return self - - def load_module(self, fullname): - """ - Iterate over the search path to locate and load fullname. - """ - root, base, target = fullname.partition(self.root_name + '.') - for prefix in self.search_path: - try: - extant = prefix + target - __import__(extant) - mod = sys.modules[extant] - sys.modules[fullname] = mod - # mysterious hack: - # Remove the reference to the extant package/module - # on later Python versions to cause relative imports - # in the vendor package to resolve the same modules - # as those going through this importer. - if sys.version_info > (3, 3): - del sys.modules[extant] - return mod - except ImportError: - pass - else: - raise ImportError( - "The '{target}' package is required; " - "normally this is bundled with this package so if you get " - "this warning, consult the packager of your " - "distribution.".format(**locals()) - ) - - def install(self): - """ - Install this importer into sys.meta_path if not already present. - """ - if self not in sys.meta_path: - sys.meta_path.append(self) - - -names = 'packaging', 'pyparsing', 'six', 'appdirs' -VendorImporter(__name__, names).install() diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index b837eba..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/extern/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/py31compat.py b/classifier/myenv/lib/python3.6/site-packages/pkg_resources/py31compat.py deleted file mode 100644 index 331a51b..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/pkg_resources/py31compat.py +++ /dev/null @@ -1,22 +0,0 @@ -import os -import errno -import sys - - -def _makedirs_31(path, exist_ok=False): - try: - os.makedirs(path) - except OSError as exc: - if not exist_ok or exc.errno != errno.EEXIST: - raise - - -# rely on compatibility behavior until mode considerations -# and exists_ok considerations are disentangled. -# See https://github.com/pypa/setuptools/pull/1083#issuecomment-315168663 -needs_makedirs = ( - sys.version_info < (3, 2, 5) or - (3, 3) <= sys.version_info < (3, 3, 6) or - (3, 4) <= sys.version_info < (3, 4, 1) -) -makedirs = _makedirs_31 if needs_makedirs else os.makedirs diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/DESCRIPTION.rst b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/DESCRIPTION.rst deleted file mode 100644 index ba3a46b..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/DESCRIPTION.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. image:: https://img.shields.io/pypi/v/setuptools.svg - :target: https://pypi.org/project/setuptools - -.. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest - :target: https://setuptools.readthedocs.io - -.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI - :target: https://travis-ci.org/pypa/setuptools - -.. image:: https://img.shields.io/appveyor/ci/jaraco/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor - :target: https://ci.appveyor.com/project/jaraco/setuptools/branch/master - -.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg - -See the `Installation Instructions -<https://packaging.python.org/installing/>`_ in the Python Packaging -User's Guide for instructions on installing, upgrading, and uninstalling -Setuptools. - -The project is `maintained at GitHub <https://github.com/pypa/setuptools>`_. - -Questions and comments should be directed to the `distutils-sig -mailing list <http://mail.python.org/pipermail/distutils-sig/>`_. -Bug reports and especially tested patches may be -submitted directly to the `bug tracker -<https://github.com/pypa/setuptools/issues>`_. - - -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_. - - diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/INSTALLER b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/INSTALLER deleted file mode 100644 index a1b589e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/INSTALLER +++ /dev/null @@ -1 +0,0 @@ -pip diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/METADATA b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/METADATA deleted file mode 100644 index d80cdcf..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/METADATA +++ /dev/null @@ -1,67 +0,0 @@ -Metadata-Version: 2.0 -Name: setuptools -Version: 39.0.1 -Summary: Easily download, build, install, upgrade, and uninstall Python packages -Home-page: https://github.com/pypa/setuptools -Author: Python Packaging Authority -Author-email: distutils-sig@python.org -License: UNKNOWN -Project-URL: Documentation, https://setuptools.readthedocs.io/ -Keywords: CPAN PyPI distutils eggs package management -Platform: UNKNOWN -Classifier: Development Status :: 5 - Production/Stable -Classifier: Intended Audience :: Developers -Classifier: License :: OSI Approved :: MIT License -Classifier: Operating System :: OS Independent -Classifier: Programming Language :: Python :: 2 -Classifier: Programming Language :: Python :: 2.7 -Classifier: Programming Language :: Python :: 3 -Classifier: Programming Language :: Python :: 3.3 -Classifier: Programming Language :: Python :: 3.4 -Classifier: Programming Language :: Python :: 3.5 -Classifier: Programming Language :: Python :: 3.6 -Classifier: Topic :: Software Development :: Libraries :: Python Modules -Classifier: Topic :: System :: Archiving :: Packaging -Classifier: Topic :: System :: Systems Administration -Classifier: Topic :: Utilities -Requires-Python: >=2.7,!=3.0.*,!=3.1.*,!=3.2.* -Description-Content-Type: text/x-rst; charset=UTF-8 -Provides-Extra: certs -Provides-Extra: ssl - -.. image:: https://img.shields.io/pypi/v/setuptools.svg - :target: https://pypi.org/project/setuptools - -.. image:: https://readthedocs.org/projects/setuptools/badge/?version=latest - :target: https://setuptools.readthedocs.io - -.. image:: https://img.shields.io/travis/pypa/setuptools/master.svg?label=Linux%20build%20%40%20Travis%20CI - :target: https://travis-ci.org/pypa/setuptools - -.. image:: https://img.shields.io/appveyor/ci/jaraco/setuptools/master.svg?label=Windows%20build%20%40%20Appveyor - :target: https://ci.appveyor.com/project/jaraco/setuptools/branch/master - -.. image:: https://img.shields.io/pypi/pyversions/setuptools.svg - -See the `Installation Instructions -<https://packaging.python.org/installing/>`_ in the Python Packaging -User's Guide for instructions on installing, upgrading, and uninstalling -Setuptools. - -The project is `maintained at GitHub <https://github.com/pypa/setuptools>`_. - -Questions and comments should be directed to the `distutils-sig -mailing list <http://mail.python.org/pipermail/distutils-sig/>`_. -Bug reports and especially tested patches may be -submitted directly to the `bug tracker -<https://github.com/pypa/setuptools/issues>`_. - - -Code of Conduct ---------------- - -Everyone interacting in the setuptools project's codebases, issue trackers, -chat rooms, and mailing lists is expected to follow the -`PyPA Code of Conduct <https://www.pypa.io/en/latest/code-of-conduct/>`_. - - diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/RECORD b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/RECORD deleted file mode 100644 index 3cb2d47..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/RECORD +++ /dev/null @@ -1,155 +0,0 @@ -easy_install.py,sha256=MDC9vt5AxDsXX5qcKlBz2TnW6Tpuv_AobnfhCJ9X3PM,126 -setuptools/__init__.py,sha256=WWIdCbFJnZ9fZoaWDN_x1vDA_Rkm-Sc15iKvPtIYKFs,5700 -setuptools/archive_util.py,sha256=kw8Ib_lKjCcnPKNbS7h8HztRVK0d5RacU3r_KRdVnmM,6592 -setuptools/build_meta.py,sha256=FllaKTr1vSJyiUeRjVJEZmeEaRzhYueNlimtcwaJba8,5671 -setuptools/cli-32.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/cli-64.exe,sha256=KLABu5pyrnokJCv6skjXZ6GsXeyYHGcqOUT3oHI3Xpo,74752 -setuptools/cli.exe,sha256=dfEuovMNnA2HLa3jRfMPVi5tk4R7alCbpTvuxtCyw0Y,65536 -setuptools/config.py,sha256=tVYBM3w1U_uBRRTOZydflxyZ_IrTJT5odlZz3cbuhSw,16381 -setuptools/dep_util.py,sha256=fgixvC1R7sH3r13ktyf7N0FALoqEXL1cBarmNpSEoWg,935 -setuptools/depends.py,sha256=hC8QIDcM3VDpRXvRVA6OfL9AaQfxvhxHcN_w6sAyNq8,5837 -setuptools/dist.py,sha256=1GpYnnbv9Bk6uRIvwYS5TRVTSMf89tGM4PAla0WkUek,42522 -setuptools/extension.py,sha256=uc6nHI-MxwmNCNPbUiBnybSyqhpJqjbhvOQ-emdvt_E,1729 -setuptools/glibc.py,sha256=X64VvGPL2AbURKwYRsWJOXXGAYOiF_v2qixeTkAULuU,3146 -setuptools/glob.py,sha256=Y-fpv8wdHZzv9DPCaGACpMSBWJ6amq_1e0R_i8_el4w,5207 -setuptools/gui-32.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/gui-64.exe,sha256=aYKMhX1IJLn4ULHgWX0sE0yREUt6B3TEHf_jOw6yNyE,75264 -setuptools/gui.exe,sha256=XBr0bHMA6Hpz2s9s9Bzjl-PwXfa9nH4ie0rFn4V2kWA,65536 -setuptools/launch.py,sha256=sd7ejwhBocCDx_wG9rIs0OaZ8HtmmFU8ZC6IR_S0Lvg,787 -setuptools/lib2to3_ex.py,sha256=t5e12hbR2pi9V4ezWDTB4JM-AISUnGOkmcnYHek3xjg,2013 -setuptools/monkey.py,sha256=zZGTH7p0xeXQKLmEwJTPIE4m5m7fJeHoAsxyv5M8e_E,5789 -setuptools/msvc.py,sha256=8EiV9ypb3EQJQssPcH1HZbdNsbRvqsFnJ7wPFEGwFIo,40877 -setuptools/namespaces.py,sha256=F0Nrbv8KCT2OrO7rwa03om4N4GZKAlnce-rr-cgDQa8,3199 -setuptools/package_index.py,sha256=NEsrNXnt_9gGP-nCCYzV-0gk15lXAGO7RghRxpfqLqE,40142 -setuptools/pep425tags.py,sha256=NuGMx1gGif7x6iYemh0LfgBr_FZF5GFORIbgmMdU8J4,10882 -setuptools/py27compat.py,sha256=3mwxRMDk5Q5O1rSXOERbQDXhFqwDJhhUitfMW_qpUCo,536 -setuptools/py31compat.py,sha256=XuU1HCsGE_3zGvBRIhYw2iB-IhCFK4-Pxw_jMiqdNVk,1192 -setuptools/py33compat.py,sha256=NKS84nl4LjLIoad6OQfgmygZn4mMvrok_b1N1tzebew,1182 -setuptools/py36compat.py,sha256=VUDWxmu5rt4QHlGTRtAFu6W5jvfL6WBjeDAzeoBy0OM,2891 -setuptools/sandbox.py,sha256=9UbwfEL5QY436oMI1LtFWohhoZ-UzwHvGyZjUH_qhkw,14276 -setuptools/script (dev).tmpl,sha256=f7MR17dTkzaqkCMSVseyOCMVrPVSMdmTQsaB8cZzfuI,201 -setuptools/script.tmpl,sha256=WGTt5piezO27c-Dbx6l5Q4T3Ff20A5z7872hv3aAhYY,138 -setuptools/site-patch.py,sha256=BVt6yIrDMXJoflA5J6DJIcsJUfW_XEeVhOzelTTFDP4,2307 -setuptools/ssl_support.py,sha256=YBDJsCZjSp62CWjxmSkke9kn9rhHHj25Cus6zhJRW3c,8492 -setuptools/unicode_utils.py,sha256=NOiZ_5hD72A6w-4wVj8awHFM3n51Kmw1Ic_vx15XFqw,996 -setuptools/version.py,sha256=og_cuZQb0QI6ukKZFfZWPlr1HgJBPPn2vO2m_bI9ZTE,144 -setuptools/wheel.py,sha256=yF9usxMvpwnymV-oOo5mfDiv3E8jrKkbDEItT7_kjBs,7230 -setuptools/windows_support.py,sha256=5GrfqSP2-dLGJoZTq2g6dCKkyQxxa2n5IQiXlJCoYEE,714 -setuptools/_vendor/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 -setuptools/_vendor/pyparsing.py,sha256=PifeLY3-WhIcBVzLtv0U4T_pwDtPruBhBCkg5vLqa28,229867 -setuptools/_vendor/six.py,sha256=A6hdJZVjI3t_geebZ9BzUvwRrIXo0lfwzQlM2LcKyas,30098 -setuptools/_vendor/packaging/__about__.py,sha256=zkcCPTN_6TcLW0Nrlg0176-R1QQ_WVPTm8sz1R4-HjM,720 -setuptools/_vendor/packaging/__init__.py,sha256=_vNac5TrzwsrzbOFIbF-5cHqc_Y2aPT2D7zrIR06BOo,513 -setuptools/_vendor/packaging/_compat.py,sha256=Vi_A0rAQeHbU-a9X0tt1yQm9RqkgQbDSxzRw8WlU9kA,860 -setuptools/_vendor/packaging/_structures.py,sha256=RImECJ4c_wTlaTYYwZYLHEiebDMaAJmK1oPARhw1T5o,1416 -setuptools/_vendor/packaging/markers.py,sha256=Gvpk9EY20yKaMTiKgQZ8yFEEpodqVgVYtfekoic1Yts,8239 -setuptools/_vendor/packaging/requirements.py,sha256=t44M2HVWtr8phIz2OhnILzuGT3rTATaovctV1dpnVIg,4343 -setuptools/_vendor/packaging/specifiers.py,sha256=SAMRerzO3fK2IkFZCaZkuwZaL_EGqHNOz4pni4vhnN0,28025 -setuptools/_vendor/packaging/utils.py,sha256=3m6WvPm6NNxE8rkTGmn0r75B_GZSGg7ikafxHsBN1WA,421 -setuptools/_vendor/packaging/version.py,sha256=OwGnxYfr2ghNzYx59qWIBkrK3SnB6n-Zfd1XaLpnnM0,11556 -setuptools/command/__init__.py,sha256=NWzJ0A1BEengZpVeqUyWLNm2bk4P3F4iL5QUErHy7kA,594 -setuptools/command/alias.py,sha256=KjpE0sz_SDIHv3fpZcIQK-sCkJz-SrC6Gmug6b9Nkc8,2426 -setuptools/command/bdist_egg.py,sha256=RQ9h8BmSVpXKJQST3i_b_sm093Z-aCXbfMBEM2IrI-Q,18185 -setuptools/command/bdist_rpm.py,sha256=B7l0TnzCGb-0nLlm6rS00jWLkojASwVmdhW2w5Qz_Ak,1508 -setuptools/command/bdist_wininst.py,sha256=_6dz3lpB1tY200LxKPLM7qgwTCceOMgaWFF-jW2-pm0,637 -setuptools/command/build_clib.py,sha256=bQ9aBr-5ZSO-9fGsGsDLz0mnnFteHUZnftVLkhvHDq0,4484 -setuptools/command/build_ext.py,sha256=PCRAZ2xYnqyEof7EFNtpKYl0sZzT0qdKUNTH3sUdPqk,13173 -setuptools/command/build_py.py,sha256=yWyYaaS9F3o9JbIczn064A5g1C5_UiKRDxGaTqYbtLE,9596 -setuptools/command/develop.py,sha256=wKbOw2_qUvcDti2lZmtxbDmYb54yAAibExzXIvToz-A,8046 -setuptools/command/dist_info.py,sha256=5t6kOfrdgALT-P3ogss6PF9k-Leyesueycuk3dUyZnI,960 -setuptools/command/easy_install.py,sha256=vRnKfAJ2hgTyt4OOLSna4BFEySfj9-KD4gGIAzfC6i8,89413 -setuptools/command/egg_info.py,sha256=BFs9e2mpws2YAFYtxxekoJaFj5X9N7o5LxpQeMdbyY4,24808 -setuptools/command/install.py,sha256=a0EZpL_A866KEdhicTGbuyD_TYl1sykfzdrri-zazT4,4683 -setuptools/command/install_egg_info.py,sha256=4zq_Ad3jE-EffParuyDEnvxU6efB-Xhrzdr8aB6Ln_8,3195 -setuptools/command/install_lib.py,sha256=n2iLR8f1MlYeGHtV2oFxDpUiL-wyLaQgwSAFX-YIEv4,5012 -setuptools/command/install_scripts.py,sha256=UD0rEZ6861mTYhIdzcsqKnUl8PozocXWl9VBQ1VTWnc,2439 -setuptools/command/launcher manifest.xml,sha256=xlLbjWrB01tKC0-hlVkOKkiSPbzMml2eOPtJ_ucCnbE,628 -setuptools/command/py36compat.py,sha256=SzjZcOxF7zdFUT47Zv2n7AM3H8koDys_0OpS-n9gIfc,4986 -setuptools/command/register.py,sha256=bHlMm1qmBbSdahTOT8w6UhA-EgeQIz7p6cD-qOauaiI,270 -setuptools/command/rotate.py,sha256=co5C1EkI7P0GGT6Tqz-T2SIj2LBJTZXYELpmao6d4KQ,2164 -setuptools/command/saveopts.py,sha256=za7QCBcQimKKriWcoCcbhxPjUz30gSB74zuTL47xpP4,658 -setuptools/command/sdist.py,sha256=obDTe2BmWt2PlnFPZZh7e0LWvemEsbCCO9MzhrTZjm8,6711 -setuptools/command/setopt.py,sha256=NTWDyx-gjDF-txf4dO577s7LOzHVoKR0Mq33rFxaRr8,5085 -setuptools/command/test.py,sha256=MeBAcXUePGjPKqjz4zvTrHatLvNsjlPFcagt3XnFYdk,9214 -setuptools/command/upload.py,sha256=i1gfItZ3nQOn5FKXb8tLC2Kd7eKC8lWO4bdE6NqGpE4,1172 -setuptools/command/upload_docs.py,sha256=oXiGplM_cUKLwE4CWWw98RzCufAu8tBhMC97GegFcms,7311 -setuptools/extern/__init__.py,sha256=2eKMsBMwsZqolIcYBtLZU3t96s6xSTP4PTaNfM5P-I0,2499 -setuptools-39.0.1.dist-info/DESCRIPTION.rst,sha256=It3a3GRjT5701mqhrpMcLyW_YS2Dokv-X8zWoTaMRe0,1422 -setuptools-39.0.1.dist-info/METADATA,sha256=whsT1qR2TE8wd0dsBKl8ZwCU_sfHFOGaHLWsDuaMqkQ,2728 -setuptools-39.0.1.dist-info/RECORD,, -setuptools-39.0.1.dist-info/WHEEL,sha256=kdsN-5OJAZIiHN-iO4Rhl82KyS0bDWf4uBwMbkNafr8,110 -setuptools-39.0.1.dist-info/dependency_links.txt,sha256=HlkCFkoK5TbZ5EMLbLKYhLcY_E31kBWD8TqW2EgmatQ,239 -setuptools-39.0.1.dist-info/entry_points.txt,sha256=s4ibTr5_v_-uWueemgrdzLUIL_ageOMqsgCAKZDkY2E,2934 -setuptools-39.0.1.dist-info/metadata.json,sha256=kbzF0VTLzT0y_Hlf-90rhO8kH_DP02x9lA-6jkrIGsQ,4650 -setuptools-39.0.1.dist-info/top_level.txt,sha256=2HUXVVwA4Pff1xgTFr3GsTXXKaPaO6vlG6oNJ_4u4Tg,38 -setuptools-39.0.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 -../../../bin/easy_install,sha256=32ROmkQO86ph6guvTGWodFLEDH992DpAYvkZKMcMSR0,309 -../../../bin/easy_install-3.6,sha256=32ROmkQO86ph6guvTGWodFLEDH992DpAYvkZKMcMSR0,309 -setuptools-39.0.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 -setuptools/extern/__pycache__/__init__.cpython-36.pyc,, -setuptools/command/__pycache__/upload.cpython-36.pyc,, -setuptools/command/__pycache__/dist_info.cpython-36.pyc,, -setuptools/command/__pycache__/build_clib.cpython-36.pyc,, -setuptools/command/__pycache__/build_ext.cpython-36.pyc,, -setuptools/command/__pycache__/rotate.cpython-36.pyc,, -setuptools/command/__pycache__/saveopts.cpython-36.pyc,, -setuptools/command/__pycache__/__init__.cpython-36.pyc,, -setuptools/command/__pycache__/install_lib.cpython-36.pyc,, -setuptools/command/__pycache__/register.cpython-36.pyc,, -setuptools/command/__pycache__/bdist_rpm.cpython-36.pyc,, -setuptools/command/__pycache__/alias.cpython-36.pyc,, -setuptools/command/__pycache__/install_scripts.cpython-36.pyc,, -setuptools/command/__pycache__/sdist.cpython-36.pyc,, -setuptools/command/__pycache__/egg_info.cpython-36.pyc,, -setuptools/command/__pycache__/bdist_egg.cpython-36.pyc,, -setuptools/command/__pycache__/py36compat.cpython-36.pyc,, -setuptools/command/__pycache__/test.cpython-36.pyc,, -setuptools/command/__pycache__/develop.cpython-36.pyc,, -setuptools/command/__pycache__/install.cpython-36.pyc,, -setuptools/command/__pycache__/install_egg_info.cpython-36.pyc,, -setuptools/command/__pycache__/setopt.cpython-36.pyc,, -setuptools/command/__pycache__/upload_docs.cpython-36.pyc,, -setuptools/command/__pycache__/easy_install.cpython-36.pyc,, -setuptools/command/__pycache__/bdist_wininst.cpython-36.pyc,, -setuptools/command/__pycache__/build_py.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/markers.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/utils.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/requirements.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc,, -setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc,, -setuptools/_vendor/__pycache__/pyparsing.cpython-36.pyc,, -setuptools/_vendor/__pycache__/__init__.cpython-36.pyc,, -setuptools/_vendor/__pycache__/six.cpython-36.pyc,, -setuptools/__pycache__/ssl_support.cpython-36.pyc,, -setuptools/__pycache__/wheel.cpython-36.pyc,, -setuptools/__pycache__/namespaces.cpython-36.pyc,, -setuptools/__pycache__/unicode_utils.cpython-36.pyc,, -setuptools/__pycache__/version.cpython-36.pyc,, -setuptools/__pycache__/lib2to3_ex.cpython-36.pyc,, -setuptools/__pycache__/__init__.cpython-36.pyc,, -setuptools/__pycache__/launch.cpython-36.pyc,, -setuptools/__pycache__/py31compat.cpython-36.pyc,, -setuptools/__pycache__/package_index.cpython-36.pyc,, -setuptools/__pycache__/glob.cpython-36.pyc,, -setuptools/__pycache__/py33compat.cpython-36.pyc,, -setuptools/__pycache__/archive_util.cpython-36.pyc,, -setuptools/__pycache__/py36compat.cpython-36.pyc,, -setuptools/__pycache__/py27compat.cpython-36.pyc,, -setuptools/__pycache__/monkey.cpython-36.pyc,, -setuptools/__pycache__/dep_util.cpython-36.pyc,, -setuptools/__pycache__/site-patch.cpython-36.pyc,, -setuptools/__pycache__/build_meta.cpython-36.pyc,, -setuptools/__pycache__/msvc.cpython-36.pyc,, -setuptools/__pycache__/config.cpython-36.pyc,, -setuptools/__pycache__/glibc.cpython-36.pyc,, -setuptools/__pycache__/dist.cpython-36.pyc,, -setuptools/__pycache__/sandbox.cpython-36.pyc,, -setuptools/__pycache__/windows_support.cpython-36.pyc,, -setuptools/__pycache__/pep425tags.cpython-36.pyc,, -setuptools/__pycache__/depends.cpython-36.pyc,, -setuptools/__pycache__/extension.cpython-36.pyc,, -__pycache__/easy_install.cpython-36.pyc,, diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/WHEEL b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/WHEEL deleted file mode 100644 index 7332a41..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/WHEEL +++ /dev/null @@ -1,6 +0,0 @@ -Wheel-Version: 1.0 -Generator: bdist_wheel (0.30.0) -Root-Is-Purelib: true -Tag: py2-none-any -Tag: py3-none-any - diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/dependency_links.txt b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/dependency_links.txt deleted file mode 100644 index e87d021..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/dependency_links.txt +++ /dev/null @@ -1,2 +0,0 @@ -https://files.pythonhosted.org/packages/source/c/certifi/certifi-2016.9.26.tar.gz#md5=baa81e951a29958563689d868ef1064d -https://files.pythonhosted.org/packages/source/w/wincertstore/wincertstore-0.2.zip#md5=ae728f2f007185648d0c7a8679b361e2 diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/entry_points.txt b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/entry_points.txt deleted file mode 100644 index b429e52..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/entry_points.txt +++ /dev/null @@ -1,64 +0,0 @@ -[console_scripts] -easy_install = setuptools.command.easy_install:main - -[distutils.commands] -alias = setuptools.command.alias:alias -bdist_egg = setuptools.command.bdist_egg:bdist_egg -bdist_rpm = setuptools.command.bdist_rpm:bdist_rpm -bdist_wininst = setuptools.command.bdist_wininst:bdist_wininst -build_clib = setuptools.command.build_clib:build_clib -build_ext = setuptools.command.build_ext:build_ext -build_py = setuptools.command.build_py:build_py -develop = setuptools.command.develop:develop -dist_info = setuptools.command.dist_info:dist_info -easy_install = setuptools.command.easy_install:easy_install -egg_info = setuptools.command.egg_info:egg_info -install = setuptools.command.install:install -install_egg_info = setuptools.command.install_egg_info:install_egg_info -install_lib = setuptools.command.install_lib:install_lib -install_scripts = setuptools.command.install_scripts:install_scripts -register = setuptools.command.register:register -rotate = setuptools.command.rotate:rotate -saveopts = setuptools.command.saveopts:saveopts -sdist = setuptools.command.sdist:sdist -setopt = setuptools.command.setopt:setopt -test = setuptools.command.test:test -upload = setuptools.command.upload:upload -upload_docs = setuptools.command.upload_docs:upload_docs - -[distutils.setup_keywords] -convert_2to3_doctests = setuptools.dist:assert_string_list -dependency_links = setuptools.dist:assert_string_list -eager_resources = setuptools.dist:assert_string_list -entry_points = setuptools.dist:check_entry_points -exclude_package_data = setuptools.dist:check_package_data -extras_require = setuptools.dist:check_extras -include_package_data = setuptools.dist:assert_bool -install_requires = setuptools.dist:check_requirements -namespace_packages = setuptools.dist:check_nsp -package_data = setuptools.dist:check_package_data -packages = setuptools.dist:check_packages -python_requires = setuptools.dist:check_specifier -setup_requires = setuptools.dist:check_requirements -test_loader = setuptools.dist:check_importable -test_runner = setuptools.dist:check_importable -test_suite = setuptools.dist:check_test_suite -tests_require = setuptools.dist:check_requirements -use_2to3 = setuptools.dist:assert_bool -use_2to3_exclude_fixers = setuptools.dist:assert_string_list -use_2to3_fixers = setuptools.dist:assert_string_list -zip_safe = setuptools.dist:assert_bool - -[egg_info.writers] -PKG-INFO = setuptools.command.egg_info:write_pkg_info -dependency_links.txt = setuptools.command.egg_info:overwrite_arg -depends.txt = setuptools.command.egg_info:warn_depends_obsolete -eager_resources.txt = setuptools.command.egg_info:overwrite_arg -entry_points.txt = setuptools.command.egg_info:write_entries -namespace_packages.txt = setuptools.command.egg_info:overwrite_arg -requires.txt = setuptools.command.egg_info:write_requirements -top_level.txt = setuptools.command.egg_info:write_toplevel_names - -[setuptools.installation] -eggsecutable = setuptools.command.easy_install:bootstrap - diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/metadata.json b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/metadata.json deleted file mode 100644 index 494b205..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/metadata.json +++ /dev/null @@ -1 +0,0 @@ -{"classifiers": ["Development Status :: 5 - Production/Stable", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Topic :: Software Development :: Libraries :: Python Modules", "Topic :: System :: Archiving :: Packaging", "Topic :: System :: Systems Administration", "Topic :: Utilities"], "description_content_type": "text/x-rst; charset=UTF-8", "extensions": {"python.commands": {"wrap_console": {"easy_install": "setuptools.command.easy_install:main"}}, "python.details": {"contacts": [{"email": "distutils-sig@python.org", "name": "Python Packaging Authority", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst"}, "project_urls": {"Home": "https://github.com/pypa/setuptools"}}, "python.exports": {"console_scripts": {"easy_install": "setuptools.command.easy_install:main"}, "distutils.commands": {"alias": "setuptools.command.alias:alias", "bdist_egg": "setuptools.command.bdist_egg:bdist_egg", "bdist_rpm": "setuptools.command.bdist_rpm:bdist_rpm", "bdist_wininst": "setuptools.command.bdist_wininst:bdist_wininst", "build_clib": "setuptools.command.build_clib:build_clib", "build_ext": "setuptools.command.build_ext:build_ext", "build_py": "setuptools.command.build_py:build_py", "develop": "setuptools.command.develop:develop", "dist_info": "setuptools.command.dist_info:dist_info", "easy_install": "setuptools.command.easy_install:easy_install", "egg_info": "setuptools.command.egg_info:egg_info", "install": "setuptools.command.install:install", "install_egg_info": "setuptools.command.install_egg_info:install_egg_info", "install_lib": "setuptools.command.install_lib:install_lib", "install_scripts": "setuptools.command.install_scripts:install_scripts", "register": "setuptools.command.register:register", "rotate": "setuptools.command.rotate:rotate", "saveopts": "setuptools.command.saveopts:saveopts", "sdist": "setuptools.command.sdist:sdist", "setopt": "setuptools.command.setopt:setopt", "test": "setuptools.command.test:test", "upload": "setuptools.command.upload:upload", "upload_docs": "setuptools.command.upload_docs:upload_docs"}, "distutils.setup_keywords": {"convert_2to3_doctests": "setuptools.dist:assert_string_list", "dependency_links": "setuptools.dist:assert_string_list", "eager_resources": "setuptools.dist:assert_string_list", "entry_points": "setuptools.dist:check_entry_points", "exclude_package_data": "setuptools.dist:check_package_data", "extras_require": "setuptools.dist:check_extras", "include_package_data": "setuptools.dist:assert_bool", "install_requires": "setuptools.dist:check_requirements", "namespace_packages": "setuptools.dist:check_nsp", "package_data": "setuptools.dist:check_package_data", "packages": "setuptools.dist:check_packages", "python_requires": "setuptools.dist:check_specifier", "setup_requires": "setuptools.dist:check_requirements", "test_loader": "setuptools.dist:check_importable", "test_runner": "setuptools.dist:check_importable", "test_suite": "setuptools.dist:check_test_suite", "tests_require": "setuptools.dist:check_requirements", "use_2to3": "setuptools.dist:assert_bool", "use_2to3_exclude_fixers": "setuptools.dist:assert_string_list", "use_2to3_fixers": "setuptools.dist:assert_string_list", "zip_safe": "setuptools.dist:assert_bool"}, "egg_info.writers": {"PKG-INFO": "setuptools.command.egg_info:write_pkg_info", "dependency_links.txt": "setuptools.command.egg_info:overwrite_arg", "depends.txt": "setuptools.command.egg_info:warn_depends_obsolete", "eager_resources.txt": "setuptools.command.egg_info:overwrite_arg", "entry_points.txt": "setuptools.command.egg_info:write_entries", "namespace_packages.txt": "setuptools.command.egg_info:overwrite_arg", "requires.txt": "setuptools.command.egg_info:write_requirements", "top_level.txt": "setuptools.command.egg_info:write_toplevel_names"}, "setuptools.installation": {"eggsecutable": "setuptools.command.easy_install:bootstrap"}}}, "extras": ["certs", "ssl"], "generator": "bdist_wheel (0.30.0)", "keywords": ["CPAN", "PyPI", "distutils", "eggs", "package", "management"], "metadata_version": "2.0", "name": "setuptools", "project_url": "Documentation, https://setuptools.readthedocs.io/", "requires_python": ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*", "summary": "Easily download, build, install, upgrade, and uninstall Python packages", "version": "39.0.1"} \ No newline at end of file diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/top_level.txt b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/top_level.txt deleted file mode 100644 index 4577c6a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/top_level.txt +++ /dev/null @@ -1,3 +0,0 @@ -easy_install -pkg_resources -setuptools diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/zip-safe b/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/zip-safe deleted file mode 100644 index 8b13789..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools-39.0.1.dist-info/zip-safe +++ /dev/null @@ -1 +0,0 @@ - diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__init__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/__init__.py deleted file mode 100644 index 7da47fb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/__init__.py +++ /dev/null @@ -1,180 +0,0 @@ -"""Extensions to the 'distutils' for large or complex distributions""" - -import os -import functools -import distutils.core -import distutils.filelist -from distutils.util import convert_path -from fnmatch import fnmatchcase - -from setuptools.extern.six.moves import filter, map - -import setuptools.version -from setuptools.extension import Extension -from setuptools.dist import Distribution, Feature -from setuptools.depends import Require -from . import monkey - -__all__ = [ - 'setup', 'Distribution', 'Feature', 'Command', 'Extension', 'Require', - 'find_packages', -] - -__version__ = setuptools.version.__version__ - -bootstrap_install_from = None - -# If we run 2to3 on .py files, should we also convert docstrings? -# Default: yes; assume that we can detect doctests reliably -run_2to3_on_doctests = True -# Standard package names for fixer packages -lib2to3_fixer_packages = ['lib2to3.fixes'] - - -class PackageFinder(object): - """ - Generate a list of all Python packages found within a directory - """ - - @classmethod - def find(cls, where='.', exclude=(), include=('*',)): - """Return a list all Python packages found within directory 'where' - - 'where' is the root directory which will be searched for packages. It - should be supplied as a "cross-platform" (i.e. URL-style) path; it will - be converted to the appropriate local path syntax. - - 'exclude' is a sequence of package names to exclude; '*' can be used - as a wildcard in the names, such that 'foo.*' will exclude all - subpackages of 'foo' (but not 'foo' itself). - - 'include' is a sequence of package names to include. If it's - specified, only the named packages will be included. If it's not - specified, all found packages will be included. 'include' can contain - shell style wildcard patterns just like 'exclude'. - """ - - return list(cls._find_packages_iter( - convert_path(where), - cls._build_filter('ez_setup', '*__pycache__', *exclude), - cls._build_filter(*include))) - - @classmethod - def _find_packages_iter(cls, where, exclude, include): - """ - All the packages found in 'where' that pass the 'include' filter, but - not the 'exclude' filter. - """ - for root, dirs, files in os.walk(where, followlinks=True): - # Copy dirs to iterate over it, then empty dirs. - all_dirs = dirs[:] - dirs[:] = [] - - for dir in all_dirs: - full_path = os.path.join(root, dir) - rel_path = os.path.relpath(full_path, where) - package = rel_path.replace(os.path.sep, '.') - - # Skip directory trees that are not valid packages - if ('.' in dir or not cls._looks_like_package(full_path)): - continue - - # Should this package be included? - if include(package) and not exclude(package): - yield package - - # Keep searching subdirectories, as there may be more packages - # down there, even if the parent was excluded. - dirs.append(dir) - - @staticmethod - def _looks_like_package(path): - """Does a directory look like a package?""" - return os.path.isfile(os.path.join(path, '__init__.py')) - - @staticmethod - def _build_filter(*patterns): - """ - Given a list of patterns, return a callable that will be true only if - the input matches at least one of the patterns. - """ - return lambda name: any(fnmatchcase(name, pat=pat) for pat in patterns) - - -class PEP420PackageFinder(PackageFinder): - @staticmethod - def _looks_like_package(path): - return True - - -find_packages = PackageFinder.find - - -def _install_setup_requires(attrs): - # Note: do not use `setuptools.Distribution` directly, as - # our PEP 517 backend patch `distutils.core.Distribution`. - dist = distutils.core.Distribution(dict( - (k, v) for k, v in attrs.items() - if k in ('dependency_links', 'setup_requires') - )) - # Honor setup.cfg's options. - dist.parse_config_files(ignore_option_errors=True) - if dist.setup_requires: - dist.fetch_build_eggs(dist.setup_requires) - - -def setup(**attrs): - # Make sure we have any requirements needed to interpret 'attrs'. - _install_setup_requires(attrs) - return distutils.core.setup(**attrs) - -setup.__doc__ = distutils.core.setup.__doc__ - - -_Command = monkey.get_unpatched(distutils.core.Command) - - -class Command(_Command): - __doc__ = _Command.__doc__ - - command_consumes_arguments = False - - def __init__(self, dist, **kw): - """ - Construct the command for dist, updating - vars(self) with any keyword parameters. - """ - _Command.__init__(self, dist) - vars(self).update(kw) - - def reinitialize_command(self, command, reinit_subcommands=0, **kw): - cmd = _Command.reinitialize_command(self, command, reinit_subcommands) - vars(cmd).update(kw) - return cmd - - -def _find_all_simple(path): - """ - Find all files under 'path' - """ - results = ( - os.path.join(base, file) - for base, dirs, files in os.walk(path, followlinks=True) - for file in files - ) - return filter(os.path.isfile, results) - - -def findall(dir=os.curdir): - """ - Find all files under 'dir' and return the list of full filenames. - Unless dir is '.', return full filenames with dir prepended. - """ - files = _find_all_simple(dir) - if dir == os.curdir: - make_rel = functools.partial(os.path.relpath, start=dir) - files = map(make_rel, files) - return list(files) - - -monkey.patch_all() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index e21ef34..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/archive_util.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/archive_util.cpython-36.pyc deleted file mode 100644 index 46d7f97..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/archive_util.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/build_meta.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/build_meta.cpython-36.pyc deleted file mode 100644 index f625ed1..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/build_meta.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/config.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/config.cpython-36.pyc deleted file mode 100644 index ae0db09..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/config.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dep_util.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dep_util.cpython-36.pyc deleted file mode 100644 index d7e2614..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dep_util.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/depends.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/depends.cpython-36.pyc deleted file mode 100644 index a780f87..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/depends.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dist.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dist.cpython-36.pyc deleted file mode 100644 index 012d0eb..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/dist.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/extension.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/extension.cpython-36.pyc deleted file mode 100644 index e215784..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/extension.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glibc.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glibc.cpython-36.pyc deleted file mode 100644 index 16e9a93..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glibc.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glob.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glob.cpython-36.pyc deleted file mode 100644 index c79b0d5..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/glob.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/launch.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/launch.cpython-36.pyc deleted file mode 100644 index ce27bf4..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/launch.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-36.pyc deleted file mode 100644 index 26611ef..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/lib2to3_ex.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/monkey.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/monkey.cpython-36.pyc deleted file mode 100644 index f766943..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/monkey.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/msvc.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/msvc.cpython-36.pyc deleted file mode 100644 index 3d46267..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/msvc.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/namespaces.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/namespaces.cpython-36.pyc deleted file mode 100644 index 761ae39..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/namespaces.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/package_index.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/package_index.cpython-36.pyc deleted file mode 100644 index fcaf10c..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/package_index.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/pep425tags.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/pep425tags.cpython-36.pyc deleted file mode 100644 index e82faff..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/pep425tags.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py27compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py27compat.cpython-36.pyc deleted file mode 100644 index 09b7b26..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py27compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py31compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py31compat.cpython-36.pyc deleted file mode 100644 index 9fffddc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py31compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py33compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py33compat.cpython-36.pyc deleted file mode 100644 index 6a8e43b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py33compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py36compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py36compat.cpython-36.pyc deleted file mode 100644 index a0b3851..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/py36compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/sandbox.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/sandbox.cpython-36.pyc deleted file mode 100644 index a2bb11a..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/sandbox.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/site-patch.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/site-patch.cpython-36.pyc deleted file mode 100644 index cb35b1b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/site-patch.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/ssl_support.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/ssl_support.cpython-36.pyc deleted file mode 100644 index 40f4d03..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/ssl_support.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/unicode_utils.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/unicode_utils.cpython-36.pyc deleted file mode 100644 index 870d3c6..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/unicode_utils.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/version.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/version.cpython-36.pyc deleted file mode 100644 index 561206d..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/version.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/wheel.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/wheel.cpython-36.pyc deleted file mode 100644 index 6c3d675..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/wheel.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/windows_support.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/windows_support.cpython-36.pyc deleted file mode 100644 index 4d82cf4..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/__pycache__/windows_support.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__init__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 6b85846..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-36.pyc deleted file mode 100644 index 0531cde..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/pyparsing.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/six.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/six.cpython-36.pyc deleted file mode 100644 index 20df7ae..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/__pycache__/six.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__about__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__about__.py deleted file mode 100644 index 95d330e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__about__.py +++ /dev/null @@ -1,21 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] - -__title__ = "packaging" -__summary__ = "Core utilities for Python packages" -__uri__ = "https://github.com/pypa/packaging" - -__version__ = "16.8" - -__author__ = "Donald Stufft and individual contributors" -__email__ = "donald@stufft.io" - -__license__ = "BSD or Apache License, Version 2.0" -__copyright__ = "Copyright 2014-2016 %s" % __author__ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__init__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__init__.py deleted file mode 100644 index 5ee6220..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -from .__about__ import ( - __author__, __copyright__, __email__, __license__, __summary__, __title__, - __uri__, __version__ -) - -__all__ = [ - "__title__", "__summary__", "__uri__", "__version__", "__author__", - "__email__", "__license__", "__copyright__", -] diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc deleted file mode 100644 index 2581935..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__about__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index 50cebda..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc deleted file mode 100644 index 4d73e5f..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc deleted file mode 100644 index 02bf997..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/_structures.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-36.pyc deleted file mode 100644 index 91751c7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/markers.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-36.pyc deleted file mode 100644 index a7132b3..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/requirements.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc deleted file mode 100644 index b7ed30b..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/specifiers.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-36.pyc deleted file mode 100644 index 8e0fbcc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/utils.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc deleted file mode 100644 index c6f0f50..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/__pycache__/version.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_compat.py deleted file mode 100644 index 210bb80..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_compat.py +++ /dev/null @@ -1,30 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import sys - - -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 - -# flake8: noqa - -if PY3: - string_types = str, -else: - string_types = basestring, - - -def with_metaclass(meta, *bases): - """ - Create a base class with a metaclass. - """ - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_structures.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_structures.py deleted file mode 100644 index ccc2786..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/_structures.py +++ /dev/null @@ -1,68 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - - -class Infinity(object): - - def __repr__(self): - return "Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return False - - def __le__(self, other): - return False - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return True - - def __ge__(self, other): - return True - - def __neg__(self): - return NegativeInfinity - -Infinity = Infinity() - - -class NegativeInfinity(object): - - def __repr__(self): - return "-Infinity" - - def __hash__(self): - return hash(repr(self)) - - def __lt__(self, other): - return True - - def __le__(self, other): - return True - - def __eq__(self, other): - return isinstance(other, self.__class__) - - def __ne__(self, other): - return not isinstance(other, self.__class__) - - def __gt__(self, other): - return False - - def __ge__(self, other): - return False - - def __neg__(self): - return Infinity - -NegativeInfinity = NegativeInfinity() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/markers.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/markers.py deleted file mode 100644 index 031332a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/markers.py +++ /dev/null @@ -1,301 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import operator -import os -import platform -import sys - -from setuptools.extern.pyparsing import ParseException, ParseResults, stringStart, stringEnd -from setuptools.extern.pyparsing import ZeroOrMore, Group, Forward, QuotedString -from setuptools.extern.pyparsing import Literal as L # noqa - -from ._compat import string_types -from .specifiers import Specifier, InvalidSpecifier - - -__all__ = [ - "InvalidMarker", "UndefinedComparison", "UndefinedEnvironmentName", - "Marker", "default_environment", -] - - -class InvalidMarker(ValueError): - """ - An invalid marker was found, users should refer to PEP 508. - """ - - -class UndefinedComparison(ValueError): - """ - An invalid operation was attempted on a value that doesn't support it. - """ - - -class UndefinedEnvironmentName(ValueError): - """ - A name was attempted to be used that does not exist inside of the - environment. - """ - - -class Node(object): - - def __init__(self, value): - self.value = value - - def __str__(self): - return str(self.value) - - def __repr__(self): - return "<{0}({1!r})>".format(self.__class__.__name__, str(self)) - - def serialize(self): - raise NotImplementedError - - -class Variable(Node): - - def serialize(self): - return str(self) - - -class Value(Node): - - def serialize(self): - return '"{0}"'.format(self) - - -class Op(Node): - - def serialize(self): - return str(self) - - -VARIABLE = ( - L("implementation_version") | - L("platform_python_implementation") | - L("implementation_name") | - L("python_full_version") | - L("platform_release") | - L("platform_version") | - L("platform_machine") | - L("platform_system") | - L("python_version") | - L("sys_platform") | - L("os_name") | - L("os.name") | # PEP-345 - L("sys.platform") | # PEP-345 - L("platform.version") | # PEP-345 - L("platform.machine") | # PEP-345 - L("platform.python_implementation") | # PEP-345 - L("python_implementation") | # undocumented setuptools legacy - L("extra") -) -ALIASES = { - 'os.name': 'os_name', - 'sys.platform': 'sys_platform', - 'platform.version': 'platform_version', - 'platform.machine': 'platform_machine', - 'platform.python_implementation': 'platform_python_implementation', - 'python_implementation': 'platform_python_implementation' -} -VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) - -VERSION_CMP = ( - L("===") | - L("==") | - L(">=") | - L("<=") | - L("!=") | - L("~=") | - L(">") | - L("<") -) - -MARKER_OP = VERSION_CMP | L("not in") | L("in") -MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) - -MARKER_VALUE = QuotedString("'") | QuotedString('"') -MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) - -BOOLOP = L("and") | L("or") - -MARKER_VAR = VARIABLE | MARKER_VALUE - -MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) -MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) - -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() - -MARKER_EXPR = Forward() -MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) -MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) - -MARKER = stringStart + MARKER_EXPR + stringEnd - - -def _coerce_parse_result(results): - if isinstance(results, ParseResults): - return [_coerce_parse_result(i) for i in results] - else: - return results - - -def _format_marker(marker, first=True): - assert isinstance(marker, (list, tuple, string_types)) - - # Sometimes we have a structure like [[...]] which is a single item list - # where the single item is itself it's own list. In that case we want skip - # the rest of this function so that we don't get extraneous () on the - # outside. - if (isinstance(marker, list) and len(marker) == 1 and - isinstance(marker[0], (list, tuple))): - return _format_marker(marker[0]) - - if isinstance(marker, list): - inner = (_format_marker(m, first=False) for m in marker) - if first: - return " ".join(inner) - else: - return "(" + " ".join(inner) + ")" - elif isinstance(marker, tuple): - return " ".join([m.serialize() for m in marker]) - else: - return marker - - -_operators = { - "in": lambda lhs, rhs: lhs in rhs, - "not in": lambda lhs, rhs: lhs not in rhs, - "<": operator.lt, - "<=": operator.le, - "==": operator.eq, - "!=": operator.ne, - ">=": operator.ge, - ">": operator.gt, -} - - -def _eval_op(lhs, op, rhs): - try: - spec = Specifier("".join([op.serialize(), rhs])) - except InvalidSpecifier: - pass - else: - return spec.contains(lhs) - - oper = _operators.get(op.serialize()) - if oper is None: - raise UndefinedComparison( - "Undefined {0!r} on {1!r} and {2!r}.".format(op, lhs, rhs) - ) - - return oper(lhs, rhs) - - -_undefined = object() - - -def _get_env(environment, name): - value = environment.get(name, _undefined) - - if value is _undefined: - raise UndefinedEnvironmentName( - "{0!r} does not exist in evaluation environment.".format(name) - ) - - return value - - -def _evaluate_markers(markers, environment): - groups = [[]] - - for marker in markers: - assert isinstance(marker, (list, tuple, string_types)) - - if isinstance(marker, list): - groups[-1].append(_evaluate_markers(marker, environment)) - elif isinstance(marker, tuple): - lhs, op, rhs = marker - - if isinstance(lhs, Variable): - lhs_value = _get_env(environment, lhs.value) - rhs_value = rhs.value - else: - lhs_value = lhs.value - rhs_value = _get_env(environment, rhs.value) - - groups[-1].append(_eval_op(lhs_value, op, rhs_value)) - else: - assert marker in ["and", "or"] - if marker == "or": - groups.append([]) - - return any(all(item) for item in groups) - - -def format_full_version(info): - version = '{0.major}.{0.minor}.{0.micro}'.format(info) - kind = info.releaselevel - if kind != 'final': - version += kind[0] + str(info.serial) - return version - - -def default_environment(): - if hasattr(sys, 'implementation'): - iver = format_full_version(sys.implementation.version) - implementation_name = sys.implementation.name - else: - iver = '0' - implementation_name = '' - - return { - "implementation_name": implementation_name, - "implementation_version": iver, - "os_name": os.name, - "platform_machine": platform.machine(), - "platform_release": platform.release(), - "platform_system": platform.system(), - "platform_version": platform.version(), - "python_full_version": platform.python_version(), - "platform_python_implementation": platform.python_implementation(), - "python_version": platform.python_version()[:3], - "sys_platform": sys.platform, - } - - -class Marker(object): - - def __init__(self, marker): - try: - self._markers = _coerce_parse_result(MARKER.parseString(marker)) - except ParseException as e: - err_str = "Invalid marker: {0!r}, parse error at {1!r}".format( - marker, marker[e.loc:e.loc + 8]) - raise InvalidMarker(err_str) - - def __str__(self): - return _format_marker(self._markers) - - def __repr__(self): - return "<Marker({0!r})>".format(str(self)) - - def evaluate(self, environment=None): - """Evaluate a marker. - - Return the boolean from evaluating the given marker against the - environment. environment is an optional argument to override all or - part of the determined environment. - - The environment is determined from the current Python process. - """ - current_environment = default_environment() - if environment is not None: - current_environment.update(environment) - - return _evaluate_markers(self._markers, current_environment) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/requirements.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/requirements.py deleted file mode 100644 index 5b49341..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/requirements.py +++ /dev/null @@ -1,127 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import string -import re - -from setuptools.extern.pyparsing import stringStart, stringEnd, originalTextFor, ParseException -from setuptools.extern.pyparsing import ZeroOrMore, Word, Optional, Regex, Combine -from setuptools.extern.pyparsing import Literal as L # noqa -from setuptools.extern.six.moves.urllib import parse as urlparse - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r'[^ ]+')("url") -URL = (AT + URI) - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine(VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), - joinString=",", adjacent=False)("_raw_spec") -_VERSION_SPEC = Optional(((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY)) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or '') - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start:t._original_end]) -) -MARKER_SEPERATOR = SEMICOLON -MARKER = MARKER_SEPERATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = \ - NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd - - -class Requirement(object): - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string): - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - "Invalid requirement, parse error at \"{0!r}\"".format( - requirement_string[e.loc:e.loc + 8])) - - self.name = req.name - if req.url: - parsed_url = urlparse.urlparse(req.url) - if not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc): - raise InvalidRequirement("Invalid URL given") - self.url = req.url - else: - self.url = None - self.extras = set(req.extras.asList() if req.extras else []) - self.specifier = SpecifierSet(req.specifier) - self.marker = req.marker if req.marker else None - - def __str__(self): - parts = [self.name] - - if self.extras: - parts.append("[{0}]".format(",".join(sorted(self.extras)))) - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append("@ {0}".format(self.url)) - - if self.marker: - parts.append("; {0}".format(self.marker)) - - return "".join(parts) - - def __repr__(self): - return "<Requirement({0!r})>".format(str(self)) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/specifiers.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/specifiers.py deleted file mode 100644 index 7f5a76c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/specifiers.py +++ /dev/null @@ -1,774 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import abc -import functools -import itertools -import re - -from ._compat import string_types, with_metaclass -from .version import Version, LegacyVersion, parse - - -class InvalidSpecifier(ValueError): - """ - An invalid specifier was found, users should refer to PEP 440. - """ - - -class BaseSpecifier(with_metaclass(abc.ABCMeta, object)): - - @abc.abstractmethod - def __str__(self): - """ - Returns the str representation of this Specifier like object. This - should be representative of the Specifier itself. - """ - - @abc.abstractmethod - def __hash__(self): - """ - Returns a hash value for this Specifier like object. - """ - - @abc.abstractmethod - def __eq__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are equal. - """ - - @abc.abstractmethod - def __ne__(self, other): - """ - Returns a boolean representing whether or not the two Specifier like - objects are not equal. - """ - - @abc.abstractproperty - def prereleases(self): - """ - Returns whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @prereleases.setter - def prereleases(self, value): - """ - Sets whether or not pre-releases as a whole are allowed by this - specifier. - """ - - @abc.abstractmethod - def contains(self, item, prereleases=None): - """ - Determines if the given item is contained within this specifier. - """ - - @abc.abstractmethod - def filter(self, iterable, prereleases=None): - """ - Takes an iterable of items and filters them so that only items which - are contained within this specifier are allowed in it. - """ - - -class _IndividualSpecifier(BaseSpecifier): - - _operators = {} - - def __init__(self, spec="", prereleases=None): - match = self._regex.search(spec) - if not match: - raise InvalidSpecifier("Invalid specifier: '{0}'".format(spec)) - - self._spec = ( - match.group("operator").strip(), - match.group("version").strip(), - ) - - # Store whether or not this Specifier should accept prereleases - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<{0}({1!r}{2})>".format( - self.__class__.__name__, - str(self), - pre, - ) - - def __str__(self): - return "{0}{1}".format(*self._spec) - - def __hash__(self): - return hash(self._spec) - - def __eq__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec == other._spec - - def __ne__(self, other): - if isinstance(other, string_types): - try: - other = self.__class__(other) - except InvalidSpecifier: - return NotImplemented - elif not isinstance(other, self.__class__): - return NotImplemented - - return self._spec != other._spec - - def _get_operator(self, op): - return getattr(self, "_compare_{0}".format(self._operators[op])) - - def _coerce_version(self, version): - if not isinstance(version, (LegacyVersion, Version)): - version = parse(version) - return version - - @property - def operator(self): - return self._spec[0] - - @property - def version(self): - return self._spec[1] - - @property - def prereleases(self): - return self._prereleases - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Determine if prereleases are to be allowed or not. - if prereleases is None: - prereleases = self.prereleases - - # Normalize item to a Version or LegacyVersion, this allows us to have - # a shortcut for ``"2.0" in Specifier(">=2") - item = self._coerce_version(item) - - # Determine if we should be supporting prereleases in this specifier - # or not, if we do not support prereleases than we can short circuit - # logic if this version is a prereleases. - if item.is_prerelease and not prereleases: - return False - - # Actually do the comparison to determine if this item is contained - # within this Specifier or not. - return self._get_operator(self.operator)(item, self.version) - - def filter(self, iterable, prereleases=None): - yielded = False - found_prereleases = [] - - kw = {"prereleases": prereleases if prereleases is not None else True} - - # Attempt to iterate over all the values in the iterable and if any of - # them match, yield them. - for version in iterable: - parsed_version = self._coerce_version(version) - - if self.contains(parsed_version, **kw): - # If our version is a prerelease, and we were not set to allow - # prereleases, then we'll store it for later incase nothing - # else matches this specifier. - if (parsed_version.is_prerelease and not - (prereleases or self.prereleases)): - found_prereleases.append(version) - # Either this is not a prerelease, or we should have been - # accepting prereleases from the begining. - else: - yielded = True - yield version - - # Now that we've iterated over everything, determine if we've yielded - # any values, and if we have not and we have any prereleases stored up - # then we will go ahead and yield the prereleases. - if not yielded and found_prereleases: - for version in found_prereleases: - yield version - - -class LegacySpecifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(==|!=|<=|>=|<|>)) - \s* - (?P<version> - [^,;\s)]* # Since this is a "legacy" specifier, and the version - # string can be just about anything, we match everything - # except for whitespace, a semi-colon for marker support, - # a closing paren since versions can be enclosed in - # them, and a comma since it's a version separator. - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - } - - def _coerce_version(self, version): - if not isinstance(version, LegacyVersion): - version = LegacyVersion(str(version)) - return version - - def _compare_equal(self, prospective, spec): - return prospective == self._coerce_version(spec) - - def _compare_not_equal(self, prospective, spec): - return prospective != self._coerce_version(spec) - - def _compare_less_than_equal(self, prospective, spec): - return prospective <= self._coerce_version(spec) - - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= self._coerce_version(spec) - - def _compare_less_than(self, prospective, spec): - return prospective < self._coerce_version(spec) - - def _compare_greater_than(self, prospective, spec): - return prospective > self._coerce_version(spec) - - -def _require_version_compare(fn): - @functools.wraps(fn) - def wrapped(self, prospective, spec): - if not isinstance(prospective, Version): - return False - return fn(self, prospective, spec) - return wrapped - - -class Specifier(_IndividualSpecifier): - - _regex_str = ( - r""" - (?P<operator>(~=|==|!=|<=|>=|<|>|===)) - (?P<version> - (?: - # The identity operators allow for an escape hatch that will - # do an exact string match of the version you wish to install. - # This will not be parsed by PEP 440 and we cannot determine - # any semantic meaning from it. This operator is discouraged - # but included entirely as an escape hatch. - (?<====) # Only match for the identity operator - \s* - [^\s]* # We just match everything, except for whitespace - # since we are only testing for strict identity. - ) - | - (?: - # The (non)equality operators allow for wild card and local - # versions to be specified so we have to define these two - # operators separately to enable that. - (?<===|!=) # Only match for equals and not equals - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - - # You cannot use a wild card and a dev or local version - # together so group them with a | and make them optional. - (?: - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local - | - \.\* # Wild card syntax of .* - )? - ) - | - (?: - # The compatible operator requires at least two digits in the - # release segment. - (?<=~=) # Only match for the compatible operator - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - | - (?: - # All other operators only allow a sub set of what the - # (non)equality operators do. Specifically they do not allow - # local versions to be specified nor do they allow the prefix - # matching wild cards. - (?<!==|!=|~=) # We have special cases for these - # operators so we want to make sure they - # don't match here. - - \s* - v? - (?:[0-9]+!)? # epoch - [0-9]+(?:\.[0-9]+)* # release - (?: # pre release - [-_\.]? - (a|b|c|rc|alpha|beta|pre|preview) - [-_\.]? - [0-9]* - )? - (?: # post release - (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) - )? - (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release - ) - ) - """ - ) - - _regex = re.compile( - r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) - - _operators = { - "~=": "compatible", - "==": "equal", - "!=": "not_equal", - "<=": "less_than_equal", - ">=": "greater_than_equal", - "<": "less_than", - ">": "greater_than", - "===": "arbitrary", - } - - @_require_version_compare - def _compare_compatible(self, prospective, spec): - # Compatible releases have an equivalent combination of >= and ==. That - # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to - # implement this in terms of the other specifiers instead of - # implementing it ourselves. The only thing we need to do is construct - # the other specifiers. - - # We want everything but the last item in the version, but we want to - # ignore post and dev releases and we want to treat the pre-release as - # it's own separate segment. - prefix = ".".join( - list( - itertools.takewhile( - lambda x: (not x.startswith("post") and not - x.startswith("dev")), - _version_split(spec), - ) - )[:-1] - ) - - # Add the prefix notation to the end of our string - prefix += ".*" - - return (self._get_operator(">=")(prospective, spec) and - self._get_operator("==")(prospective, prefix)) - - @_require_version_compare - def _compare_equal(self, prospective, spec): - # We need special logic to handle prefix matching - if spec.endswith(".*"): - # In the case of prefix matching we want to ignore local segment. - prospective = Version(prospective.public) - # Split the spec out by dots, and pretend that there is an implicit - # dot in between a release segment and a pre-release segment. - spec = _version_split(spec[:-2]) # Remove the trailing .* - - # Split the prospective version out by dots, and pretend that there - # is an implicit dot in between a release segment and a pre-release - # segment. - prospective = _version_split(str(prospective)) - - # Shorten the prospective version to be the same length as the spec - # so that we can determine if the specifier is a prefix of the - # prospective version or not. - prospective = prospective[:len(spec)] - - # Pad out our two sides with zeros so that they both equal the same - # length. - spec, prospective = _pad_version(spec, prospective) - else: - # Convert our spec string into a Version - spec = Version(spec) - - # If the specifier does not have a local segment, then we want to - # act as if the prospective version also does not have a local - # segment. - if not spec.local: - prospective = Version(prospective.public) - - return prospective == spec - - @_require_version_compare - def _compare_not_equal(self, prospective, spec): - return not self._compare_equal(prospective, spec) - - @_require_version_compare - def _compare_less_than_equal(self, prospective, spec): - return prospective <= Version(spec) - - @_require_version_compare - def _compare_greater_than_equal(self, prospective, spec): - return prospective >= Version(spec) - - @_require_version_compare - def _compare_less_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is less than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective < spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a pre-release version, that we do not accept pre-release - # versions for the version mentioned in the specifier (e.g. <3.1 should - # not match 3.1.dev0, but should match 3.0.dev0). - if not spec.is_prerelease and prospective.is_prerelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # less than the spec version *and* it's not a pre-release of the same - # version in the spec. - return True - - @_require_version_compare - def _compare_greater_than(self, prospective, spec): - # Convert our spec to a Version instance, since we'll want to work with - # it as a version. - spec = Version(spec) - - # Check to see if the prospective version is greater than the spec - # version. If it's not we can short circuit and just return False now - # instead of doing extra unneeded work. - if not prospective > spec: - return False - - # This special case is here so that, unless the specifier itself - # includes is a post-release version, that we do not accept - # post-release versions for the version mentioned in the specifier - # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). - if not spec.is_postrelease and prospective.is_postrelease: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # Ensure that we do not allow a local version of the version mentioned - # in the specifier, which is techincally greater than, to match. - if prospective.local is not None: - if Version(prospective.base_version) == Version(spec.base_version): - return False - - # If we've gotten to here, it means that prospective version is both - # greater than the spec version *and* it's not a pre-release of the - # same version in the spec. - return True - - def _compare_arbitrary(self, prospective, spec): - return str(prospective).lower() == str(spec).lower() - - @property - def prereleases(self): - # If there is an explicit prereleases set for this, then we'll just - # blindly use that. - if self._prereleases is not None: - return self._prereleases - - # Look at all of our specifiers and determine if they are inclusive - # operators, and if they are if they are including an explicit - # prerelease. - operator, version = self._spec - if operator in ["==", ">=", "<=", "~=", "==="]: - # The == specifier can include a trailing .*, if it does we - # want to remove before parsing. - if operator == "==" and version.endswith(".*"): - version = version[:-2] - - # Parse the version, and if it is a pre-release than this - # specifier allows pre-releases. - if parse(version).is_prerelease: - return True - - return False - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - -_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") - - -def _version_split(version): - result = [] - for item in version.split("."): - match = _prefix_regex.search(item) - if match: - result.extend(match.groups()) - else: - result.append(item) - return result - - -def _pad_version(left, right): - left_split, right_split = [], [] - - # Get the release segment of our versions - left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) - right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) - - # Get the rest of our versions - left_split.append(left[len(left_split[0]):]) - right_split.append(right[len(right_split[0]):]) - - # Insert our padding - left_split.insert( - 1, - ["0"] * max(0, len(right_split[0]) - len(left_split[0])), - ) - right_split.insert( - 1, - ["0"] * max(0, len(left_split[0]) - len(right_split[0])), - ) - - return ( - list(itertools.chain(*left_split)), - list(itertools.chain(*right_split)), - ) - - -class SpecifierSet(BaseSpecifier): - - def __init__(self, specifiers="", prereleases=None): - # Split on , to break each indidivual specifier into it's own item, and - # strip each item to remove leading/trailing whitespace. - specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] - - # Parsed each individual specifier, attempting first to make it a - # Specifier and falling back to a LegacySpecifier. - parsed = set() - for specifier in specifiers: - try: - parsed.add(Specifier(specifier)) - except InvalidSpecifier: - parsed.add(LegacySpecifier(specifier)) - - # Turn our parsed specifiers into a frozen set and save them for later. - self._specs = frozenset(parsed) - - # Store our prereleases value so we can use it later to determine if - # we accept prereleases or not. - self._prereleases = prereleases - - def __repr__(self): - pre = ( - ", prereleases={0!r}".format(self.prereleases) - if self._prereleases is not None - else "" - ) - - return "<SpecifierSet({0!r}{1})>".format(str(self), pre) - - def __str__(self): - return ",".join(sorted(str(s) for s in self._specs)) - - def __hash__(self): - return hash(self._specs) - - def __and__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - specifier = SpecifierSet() - specifier._specs = frozenset(self._specs | other._specs) - - if self._prereleases is None and other._prereleases is not None: - specifier._prereleases = other._prereleases - elif self._prereleases is not None and other._prereleases is None: - specifier._prereleases = self._prereleases - elif self._prereleases == other._prereleases: - specifier._prereleases = self._prereleases - else: - raise ValueError( - "Cannot combine SpecifierSets with True and False prerelease " - "overrides." - ) - - return specifier - - def __eq__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs == other._specs - - def __ne__(self, other): - if isinstance(other, string_types): - other = SpecifierSet(other) - elif isinstance(other, _IndividualSpecifier): - other = SpecifierSet(str(other)) - elif not isinstance(other, SpecifierSet): - return NotImplemented - - return self._specs != other._specs - - def __len__(self): - return len(self._specs) - - def __iter__(self): - return iter(self._specs) - - @property - def prereleases(self): - # If we have been given an explicit prerelease modifier, then we'll - # pass that through here. - if self._prereleases is not None: - return self._prereleases - - # If we don't have any specifiers, and we don't have a forced value, - # then we'll just return None since we don't know if this should have - # pre-releases or not. - if not self._specs: - return None - - # Otherwise we'll see if any of the given specifiers accept - # prereleases, if any of them do we'll return True, otherwise False. - return any(s.prereleases for s in self._specs) - - @prereleases.setter - def prereleases(self, value): - self._prereleases = value - - def __contains__(self, item): - return self.contains(item) - - def contains(self, item, prereleases=None): - # Ensure that our item is a Version or LegacyVersion instance. - if not isinstance(item, (LegacyVersion, Version)): - item = parse(item) - - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # We can determine if we're going to allow pre-releases by looking to - # see if any of the underlying items supports them. If none of them do - # and this item is a pre-release then we do not allow it and we can - # short circuit that here. - # Note: This means that 1.0.dev1 would not be contained in something - # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 - if not prereleases and item.is_prerelease: - return False - - # We simply dispatch to the underlying specs here to make sure that the - # given version is contained within all of them. - # Note: This use of all() here means that an empty set of specifiers - # will always return True, this is an explicit design decision. - return all( - s.contains(item, prereleases=prereleases) - for s in self._specs - ) - - def filter(self, iterable, prereleases=None): - # Determine if we're forcing a prerelease or not, if we're not forcing - # one for this particular filter call, then we'll use whatever the - # SpecifierSet thinks for whether or not we should support prereleases. - if prereleases is None: - prereleases = self.prereleases - - # If we have any specifiers, then we want to wrap our iterable in the - # filter method for each one, this will act as a logical AND amongst - # each specifier. - if self._specs: - for spec in self._specs: - iterable = spec.filter(iterable, prereleases=bool(prereleases)) - return iterable - # If we do not have any specifiers, then we need to have a rough filter - # which will filter out any pre-releases, unless there are no final - # releases, and which will filter out LegacyVersion in general. - else: - filtered = [] - found_prereleases = [] - - for item in iterable: - # Ensure that we some kind of Version class for this item. - if not isinstance(item, (LegacyVersion, Version)): - parsed_version = parse(item) - else: - parsed_version = item - - # Filter out any item which is parsed as a LegacyVersion - if isinstance(parsed_version, LegacyVersion): - continue - - # Store any item which is a pre-release for later unless we've - # already found a final version or we are accepting prereleases - if parsed_version.is_prerelease and not prereleases: - if not filtered: - found_prereleases.append(item) - else: - filtered.append(item) - - # If we've found no items except for pre-releases, then we'll go - # ahead and use the pre-releases - if not filtered and found_prereleases and prereleases is None: - return found_prereleases - - return filtered diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/utils.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/utils.py deleted file mode 100644 index 942387c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/utils.py +++ /dev/null @@ -1,14 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import re - - -_canonicalize_regex = re.compile(r"[-_.]+") - - -def canonicalize_name(name): - # This is taken from PEP 503. - return _canonicalize_regex.sub("-", name).lower() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/version.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/version.py deleted file mode 100644 index 83b5ee8..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/packaging/version.py +++ /dev/null @@ -1,393 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. -from __future__ import absolute_import, division, print_function - -import collections -import itertools -import re - -from ._structures import Infinity - - -__all__ = [ - "parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN" -] - - -_Version = collections.namedtuple( - "_Version", - ["epoch", "release", "dev", "pre", "post", "local"], -) - - -def parse(version): - """ - Parse the given version string and return either a :class:`Version` object - or a :class:`LegacyVersion` object depending on if the given version is - a valid PEP 440 version or a legacy version. - """ - try: - return Version(version) - except InvalidVersion: - return LegacyVersion(version) - - -class InvalidVersion(ValueError): - """ - An invalid version was found, users should refer to PEP 440. - """ - - -class _BaseVersion(object): - - def __hash__(self): - return hash(self._key) - - def __lt__(self, other): - return self._compare(other, lambda s, o: s < o) - - def __le__(self, other): - return self._compare(other, lambda s, o: s <= o) - - def __eq__(self, other): - return self._compare(other, lambda s, o: s == o) - - def __ge__(self, other): - return self._compare(other, lambda s, o: s >= o) - - def __gt__(self, other): - return self._compare(other, lambda s, o: s > o) - - def __ne__(self, other): - return self._compare(other, lambda s, o: s != o) - - def _compare(self, other, method): - if not isinstance(other, _BaseVersion): - return NotImplemented - - return method(self._key, other._key) - - -class LegacyVersion(_BaseVersion): - - def __init__(self, version): - self._version = str(version) - self._key = _legacy_cmpkey(self._version) - - def __str__(self): - return self._version - - def __repr__(self): - return "<LegacyVersion({0})>".format(repr(str(self))) - - @property - def public(self): - return self._version - - @property - def base_version(self): - return self._version - - @property - def local(self): - return None - - @property - def is_prerelease(self): - return False - - @property - def is_postrelease(self): - return False - - -_legacy_version_component_re = re.compile( - r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE, -) - -_legacy_version_replacement_map = { - "pre": "c", "preview": "c", "-": "final-", "rc": "c", "dev": "@", -} - - -def _parse_version_parts(s): - for part in _legacy_version_component_re.split(s): - part = _legacy_version_replacement_map.get(part, part) - - if not part or part == ".": - continue - - if part[:1] in "0123456789": - # pad for numeric comparison - yield part.zfill(8) - else: - yield "*" + part - - # ensure that alpha/beta/candidate are before final - yield "*final" - - -def _legacy_cmpkey(version): - # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch - # greater than or equal to 0. This will effectively put the LegacyVersion, - # which uses the defacto standard originally implemented by setuptools, - # as before all PEP 440 versions. - epoch = -1 - - # This scheme is taken from pkg_resources.parse_version setuptools prior to - # it's adoption of the packaging library. - parts = [] - for part in _parse_version_parts(version.lower()): - if part.startswith("*"): - # remove "-" before a prerelease tag - if part < "*final": - while parts and parts[-1] == "*final-": - parts.pop() - - # remove trailing zeros from each series of numeric parts - while parts and parts[-1] == "00000000": - parts.pop() - - parts.append(part) - parts = tuple(parts) - - return epoch, parts - -# Deliberately not anchored to the start and end of the string, to make it -# easier for 3rd party code to reuse -VERSION_PATTERN = r""" - v? - (?: - (?:(?P<epoch>[0-9]+)!)? # epoch - (?P<release>[0-9]+(?:\.[0-9]+)*) # release segment - (?P<pre> # pre-release - [-_\.]? - (?P<pre_l>(a|b|c|rc|alpha|beta|pre|preview)) - [-_\.]? - (?P<pre_n>[0-9]+)? - )? - (?P<post> # post release - (?:-(?P<post_n1>[0-9]+)) - | - (?: - [-_\.]? - (?P<post_l>post|rev|r) - [-_\.]? - (?P<post_n2>[0-9]+)? - ) - )? - (?P<dev> # dev release - [-_\.]? - (?P<dev_l>dev) - [-_\.]? - (?P<dev_n>[0-9]+)? - )? - ) - (?:\+(?P<local>[a-z0-9]+(?:[-_\.][a-z0-9]+)*))? # local version -""" - - -class Version(_BaseVersion): - - _regex = re.compile( - r"^\s*" + VERSION_PATTERN + r"\s*$", - re.VERBOSE | re.IGNORECASE, - ) - - def __init__(self, version): - # Validate the version and parse it into pieces - match = self._regex.search(version) - if not match: - raise InvalidVersion("Invalid version: '{0}'".format(version)) - - # Store the parsed out pieces of the version - self._version = _Version( - epoch=int(match.group("epoch")) if match.group("epoch") else 0, - release=tuple(int(i) for i in match.group("release").split(".")), - pre=_parse_letter_version( - match.group("pre_l"), - match.group("pre_n"), - ), - post=_parse_letter_version( - match.group("post_l"), - match.group("post_n1") or match.group("post_n2"), - ), - dev=_parse_letter_version( - match.group("dev_l"), - match.group("dev_n"), - ), - local=_parse_local_version(match.group("local")), - ) - - # Generate a key which will be used for sorting - self._key = _cmpkey( - self._version.epoch, - self._version.release, - self._version.pre, - self._version.post, - self._version.dev, - self._version.local, - ) - - def __repr__(self): - return "<Version({0})>".format(repr(str(self))) - - def __str__(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - # Pre-release - if self._version.pre is not None: - parts.append("".join(str(x) for x in self._version.pre)) - - # Post-release - if self._version.post is not None: - parts.append(".post{0}".format(self._version.post[1])) - - # Development release - if self._version.dev is not None: - parts.append(".dev{0}".format(self._version.dev[1])) - - # Local version segment - if self._version.local is not None: - parts.append( - "+{0}".format(".".join(str(x) for x in self._version.local)) - ) - - return "".join(parts) - - @property - def public(self): - return str(self).split("+", 1)[0] - - @property - def base_version(self): - parts = [] - - # Epoch - if self._version.epoch != 0: - parts.append("{0}!".format(self._version.epoch)) - - # Release segment - parts.append(".".join(str(x) for x in self._version.release)) - - return "".join(parts) - - @property - def local(self): - version_string = str(self) - if "+" in version_string: - return version_string.split("+", 1)[1] - - @property - def is_prerelease(self): - return bool(self._version.dev or self._version.pre) - - @property - def is_postrelease(self): - return bool(self._version.post) - - -def _parse_letter_version(letter, number): - if letter: - # We consider there to be an implicit 0 in a pre-release if there is - # not a numeral associated with it. - if number is None: - number = 0 - - # We normalize any letters to their lower case form - letter = letter.lower() - - # We consider some words to be alternate spellings of other words and - # in those cases we want to normalize the spellings to our preferred - # spelling. - if letter == "alpha": - letter = "a" - elif letter == "beta": - letter = "b" - elif letter in ["c", "pre", "preview"]: - letter = "rc" - elif letter in ["rev", "r"]: - letter = "post" - - return letter, int(number) - if not letter and number: - # We assume if we are given a number, but we are not given a letter - # then this is using the implicit post release syntax (e.g. 1.0-1) - letter = "post" - - return letter, int(number) - - -_local_version_seperators = re.compile(r"[\._-]") - - -def _parse_local_version(local): - """ - Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve"). - """ - if local is not None: - return tuple( - part.lower() if not part.isdigit() else int(part) - for part in _local_version_seperators.split(local) - ) - - -def _cmpkey(epoch, release, pre, post, dev, local): - # When we compare a release version, we want to compare it with all of the - # trailing zeros removed. So we'll use a reverse the list, drop all the now - # leading zeros until we come to something non zero, then take the rest - # re-reverse it back into the correct order and make it a tuple and use - # that for our sorting key. - release = tuple( - reversed(list( - itertools.dropwhile( - lambda x: x == 0, - reversed(release), - ) - )) - ) - - # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. - # We'll do this by abusing the pre segment, but we _only_ want to do this - # if there is not a pre or a post segment. If we have one of those then - # the normal sorting rules will handle this case correctly. - if pre is None and post is None and dev is not None: - pre = -Infinity - # Versions without a pre-release (except as noted above) should sort after - # those with one. - elif pre is None: - pre = Infinity - - # Versions without a post segment should sort before those with one. - if post is None: - post = -Infinity - - # Versions without a development segment should sort after those with one. - if dev is None: - dev = Infinity - - if local is None: - # Versions without a local segment should sort before those with one. - local = -Infinity - else: - # Versions with a local segment need that segment parsed to implement - # the sorting rules in PEP440. - # - Alpha numeric segments sort before numeric segments - # - Alpha numeric segments sort lexicographically - # - Numeric segments sort numerically - # - Shorter versions sort before longer versions when the prefixes - # match exactly - local = tuple( - (i, "") if isinstance(i, int) else (-Infinity, i) - for i in local - ) - - return epoch, release, pre, post, dev, local diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/pyparsing.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/pyparsing.py deleted file mode 100644 index a212243..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/pyparsing.py +++ /dev/null @@ -1,5696 +0,0 @@ -# module pyparsing.py -# -# Copyright (c) 2003-2016 Paul T. McGuire -# -# Permission is hereby granted, free of charge, to any person obtaining -# a copy of this software and associated documentation files (the -# "Software"), to deal in the Software without restriction, including -# without limitation the rights to use, copy, modify, merge, publish, -# distribute, sublicense, and/or sell copies of the Software, and to -# permit persons to whom the Software is furnished to do so, subject to -# the following conditions: -# -# The above copyright notice and this permission notice shall be -# included in all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -# - -__doc__ = \ -""" -pyparsing module - Classes and methods to define and execute parsing grammars - -The pyparsing module is an alternative approach to creating and executing simple grammars, -vs. the traditional lex/yacc approach, or the use of regular expressions. With pyparsing, you -don't need to learn a new syntax for defining grammars or matching expressions - the parsing module -provides a library of classes that you use to construct the grammar directly in Python. - -Here is a program to parse "Hello, World!" (or any greeting of the form -C{"<salutation>, <addressee>!"}), built up using L{Word}, L{Literal}, and L{And} elements -(L{'+'<ParserElement.__add__>} operator gives L{And} expressions, strings are auto-converted to -L{Literal} expressions):: - - from pyparsing import Word, alphas - - # define grammar of a greeting - greet = Word(alphas) + "," + Word(alphas) + "!" - - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - -The program outputs the following:: - - Hello, World! -> ['Hello', ',', 'World', '!'] - -The Python representation of the grammar is quite readable, owing to the self-explanatory -class names, and the use of '+', '|' and '^' operators. - -The L{ParseResults} object returned from L{ParserElement.parseString<ParserElement.parseString>} can be accessed as a nested list, a dictionary, or an -object with named attributes. - -The pyparsing module handles some of the problems that are typically vexing when writing text parsers: - - extra or missing whitespace (the above program will also handle "Hello,World!", "Hello , World !", etc.) - - quoted strings - - embedded comments -""" - -__version__ = "2.1.10" -__versionTime__ = "07 Oct 2016 01:31 UTC" -__author__ = "Paul McGuire <ptmcg@users.sourceforge.net>" - -import string -from weakref import ref as wkref -import copy -import sys -import warnings -import re -import sre_constants -import collections -import pprint -import traceback -import types -from datetime import datetime - -try: - from _thread import RLock -except ImportError: - from threading import RLock - -try: - from collections import OrderedDict as _OrderedDict -except ImportError: - try: - from ordereddict import OrderedDict as _OrderedDict - except ImportError: - _OrderedDict = None - -#~ sys.stderr.write( "testing pyparsing module, version %s, %s\n" % (__version__,__versionTime__ ) ) - -__all__ = [ -'And', 'CaselessKeyword', 'CaselessLiteral', 'CharsNotIn', 'Combine', 'Dict', 'Each', 'Empty', -'FollowedBy', 'Forward', 'GoToColumn', 'Group', 'Keyword', 'LineEnd', 'LineStart', 'Literal', -'MatchFirst', 'NoMatch', 'NotAny', 'OneOrMore', 'OnlyOnce', 'Optional', 'Or', -'ParseBaseException', 'ParseElementEnhance', 'ParseException', 'ParseExpression', 'ParseFatalException', -'ParseResults', 'ParseSyntaxException', 'ParserElement', 'QuotedString', 'RecursiveGrammarException', -'Regex', 'SkipTo', 'StringEnd', 'StringStart', 'Suppress', 'Token', 'TokenConverter', -'White', 'Word', 'WordEnd', 'WordStart', 'ZeroOrMore', -'alphanums', 'alphas', 'alphas8bit', 'anyCloseTag', 'anyOpenTag', 'cStyleComment', 'col', -'commaSeparatedList', 'commonHTMLEntity', 'countedArray', 'cppStyleComment', 'dblQuotedString', -'dblSlashComment', 'delimitedList', 'dictOf', 'downcaseTokens', 'empty', 'hexnums', -'htmlComment', 'javaStyleComment', 'line', 'lineEnd', 'lineStart', 'lineno', -'makeHTMLTags', 'makeXMLTags', 'matchOnlyAtCol', 'matchPreviousExpr', 'matchPreviousLiteral', -'nestedExpr', 'nullDebugAction', 'nums', 'oneOf', 'opAssoc', 'operatorPrecedence', 'printables', -'punc8bit', 'pythonStyleComment', 'quotedString', 'removeQuotes', 'replaceHTMLEntity', -'replaceWith', 'restOfLine', 'sglQuotedString', 'srange', 'stringEnd', -'stringStart', 'traceParseAction', 'unicodeString', 'upcaseTokens', 'withAttribute', -'indentedBlock', 'originalTextFor', 'ungroup', 'infixNotation','locatedExpr', 'withClass', -'CloseMatch', 'tokenMap', 'pyparsing_common', -] - -system_version = tuple(sys.version_info)[:3] -PY_3 = system_version[0] == 3 -if PY_3: - _MAX_INT = sys.maxsize - basestring = str - unichr = chr - _ustr = str - - # build list of single arg builtins, that can be used as parse actions - singleArgBuiltins = [sum, len, sorted, reversed, list, tuple, set, any, all, min, max] - -else: - _MAX_INT = sys.maxint - range = xrange - - def _ustr(obj): - """Drop-in replacement for str(obj) that tries to be Unicode friendly. It first tries - str(obj). If that fails with a UnicodeEncodeError, then it tries unicode(obj). It - then < returns the unicode object | encodes it with the default encoding | ... >. - """ - if isinstance(obj,unicode): - return obj - - try: - # If this works, then _ustr(obj) has the same behaviour as str(obj), so - # it won't break any existing code. - return str(obj) - - except UnicodeEncodeError: - # Else encode it - ret = unicode(obj).encode(sys.getdefaultencoding(), 'xmlcharrefreplace') - xmlcharref = Regex('&#\d+;') - xmlcharref.setParseAction(lambda t: '\\u' + hex(int(t[0][2:-1]))[2:]) - return xmlcharref.transformString(ret) - - # build list of single arg builtins, tolerant of Python version, that can be used as parse actions - singleArgBuiltins = [] - import __builtin__ - for fname in "sum len sorted reversed list tuple set any all min max".split(): - try: - singleArgBuiltins.append(getattr(__builtin__,fname)) - except AttributeError: - continue - -_generatorType = type((y for y in range(1))) - -def _xml_escape(data): - """Escape &, <, >, ", ', etc. in a string of data.""" - - # ampersand must be replaced first - from_symbols = '&><"\'' - to_symbols = ('&'+s+';' for s in "amp gt lt quot apos".split()) - for from_,to_ in zip(from_symbols, to_symbols): - data = data.replace(from_, to_) - return data - -class _Constants(object): - pass - -alphas = string.ascii_uppercase + string.ascii_lowercase -nums = "0123456789" -hexnums = nums + "ABCDEFabcdef" -alphanums = alphas + nums -_bslash = chr(92) -printables = "".join(c for c in string.printable if c not in string.whitespace) - -class ParseBaseException(Exception): - """base exception class for all parsing runtime exceptions""" - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, pstr, loc=0, msg=None, elem=None ): - self.loc = loc - if msg is None: - self.msg = pstr - self.pstr = "" - else: - self.msg = msg - self.pstr = pstr - self.parserElement = elem - self.args = (pstr, loc, msg) - - @classmethod - def _from_exception(cls, pe): - """ - internal factory method to simplify creating one type of ParseException - from another - avoids having __init__ signature conflicts among subclasses - """ - return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) - - def __getattr__( self, aname ): - """supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - """ - if( aname == "lineno" ): - return lineno( self.loc, self.pstr ) - elif( aname in ("col", "column") ): - return col( self.loc, self.pstr ) - elif( aname == "line" ): - return line( self.loc, self.pstr ) - else: - raise AttributeError(aname) - - def __str__( self ): - return "%s (at char %d), (line:%d, col:%d)" % \ - ( self.msg, self.loc, self.lineno, self.column ) - def __repr__( self ): - return _ustr(self) - def markInputline( self, markerString = ">!<" ): - """Extracts the exception line from the input string, and marks - the location of the exception with a special symbol. - """ - line_str = self.line - line_column = self.column - 1 - if markerString: - line_str = "".join((line_str[:line_column], - markerString, line_str[line_column:])) - return line_str.strip() - def __dir__(self): - return "lineno col line".split() + dir(type(self)) - -class ParseException(ParseBaseException): - """ - Exception thrown when parse expressions don't match class; - supported attributes by name are: - - lineno - returns the line number of the exception text - - col - returns the column number of the exception text - - line - returns the line containing the exception text - - Example:: - try: - Word(nums).setName("integer").parseString("ABC") - except ParseException as pe: - print(pe) - print("column: {}".format(pe.col)) - - prints:: - Expected integer (at char 0), (line:1, col:1) - column: 1 - """ - pass - -class ParseFatalException(ParseBaseException): - """user-throwable exception thrown when inconsistent parse content - is found; stops all parsing immediately""" - pass - -class ParseSyntaxException(ParseFatalException): - """just like L{ParseFatalException}, but thrown internally when an - L{ErrorStop<And._ErrorStop>} ('-' operator) indicates that parsing is to stop - immediately because an unbacktrackable syntax error has been found""" - pass - -#~ class ReparseException(ParseBaseException): - #~ """Experimental class - parse actions can raise this exception to cause - #~ pyparsing to reparse the input string: - #~ - with a modified input string, and/or - #~ - with a modified start location - #~ Set the values of the ReparseException in the constructor, and raise the - #~ exception in a parse action to cause pyparsing to use the new string/location. - #~ Setting the values as None causes no change to be made. - #~ """ - #~ def __init_( self, newstring, restartLoc ): - #~ self.newParseText = newstring - #~ self.reparseLoc = restartLoc - -class RecursiveGrammarException(Exception): - """exception thrown by L{ParserElement.validate} if the grammar could be improperly recursive""" - def __init__( self, parseElementList ): - self.parseElementTrace = parseElementList - - def __str__( self ): - return "RecursiveGrammarException: %s" % self.parseElementTrace - -class _ParseResultsWithOffset(object): - def __init__(self,p1,p2): - self.tup = (p1,p2) - def __getitem__(self,i): - return self.tup[i] - def __repr__(self): - return repr(self.tup[0]) - def setOffset(self,i): - self.tup = (self.tup[0],i) - -class ParseResults(object): - """ - Structured parse results, to provide multiple means of access to the parsed data: - - as a list (C{len(results)}) - - by list index (C{results[0], results[1]}, etc.) - - by attribute (C{results.<resultsName>} - see L{ParserElement.setResultsName}) - - Example:: - integer = Word(nums) - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - # equivalent form: - # date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - # parseString returns a ParseResults object - result = date_str.parseString("1999/12/31") - - def test(s, fn=repr): - print("%s -> %s" % (s, fn(eval(s)))) - test("list(result)") - test("result[0]") - test("result['month']") - test("result.day") - test("'month' in result") - test("'minutes' in result") - test("result.dump()", str) - prints:: - list(result) -> ['1999', '/', '12', '/', '31'] - result[0] -> '1999' - result['month'] -> '12' - result.day -> '31' - 'month' in result -> True - 'minutes' in result -> False - result.dump() -> ['1999', '/', '12', '/', '31'] - - day: 31 - - month: 12 - - year: 1999 - """ - def __new__(cls, toklist=None, name=None, asList=True, modal=True ): - if isinstance(toklist, cls): - return toklist - retobj = object.__new__(cls) - retobj.__doinit = True - return retobj - - # Performance tuning: we construct a *lot* of these, so keep this - # constructor as small and fast as possible - def __init__( self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance ): - if self.__doinit: - self.__doinit = False - self.__name = None - self.__parent = None - self.__accumNames = {} - self.__asList = asList - self.__modal = modal - if toklist is None: - toklist = [] - if isinstance(toklist, list): - self.__toklist = toklist[:] - elif isinstance(toklist, _generatorType): - self.__toklist = list(toklist) - else: - self.__toklist = [toklist] - self.__tokdict = dict() - - if name is not None and name: - if not modal: - self.__accumNames[name] = 0 - if isinstance(name,int): - name = _ustr(name) # will always return a str, but use _ustr for consistency - self.__name = name - if not (isinstance(toklist, (type(None), basestring, list)) and toklist in (None,'',[])): - if isinstance(toklist,basestring): - toklist = [ toklist ] - if asList: - if isinstance(toklist,ParseResults): - self[name] = _ParseResultsWithOffset(toklist.copy(),0) - else: - self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]),0) - self[name].__name = name - else: - try: - self[name] = toklist[0] - except (KeyError,TypeError,IndexError): - self[name] = toklist - - def __getitem__( self, i ): - if isinstance( i, (int,slice) ): - return self.__toklist[i] - else: - if i not in self.__accumNames: - return self.__tokdict[i][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[i] ]) - - def __setitem__( self, k, v, isinstance=isinstance ): - if isinstance(v,_ParseResultsWithOffset): - self.__tokdict[k] = self.__tokdict.get(k,list()) + [v] - sub = v[0] - elif isinstance(k,(int,slice)): - self.__toklist[k] = v - sub = v - else: - self.__tokdict[k] = self.__tokdict.get(k,list()) + [_ParseResultsWithOffset(v,0)] - sub = v - if isinstance(sub,ParseResults): - sub.__parent = wkref(self) - - def __delitem__( self, i ): - if isinstance(i,(int,slice)): - mylen = len( self.__toklist ) - del self.__toklist[i] - - # convert int to slice - if isinstance(i, int): - if i < 0: - i += mylen - i = slice(i, i+1) - # get removed indices - removed = list(range(*i.indices(mylen))) - removed.reverse() - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for j in removed: - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position - (position > j)) - else: - del self.__tokdict[i] - - def __contains__( self, k ): - return k in self.__tokdict - - def __len__( self ): return len( self.__toklist ) - def __bool__(self): return ( not not self.__toklist ) - __nonzero__ = __bool__ - def __iter__( self ): return iter( self.__toklist ) - def __reversed__( self ): return iter( self.__toklist[::-1] ) - def _iterkeys( self ): - if hasattr(self.__tokdict, "iterkeys"): - return self.__tokdict.iterkeys() - else: - return iter(self.__tokdict) - - def _itervalues( self ): - return (self[k] for k in self._iterkeys()) - - def _iteritems( self ): - return ((k, self[k]) for k in self._iterkeys()) - - if PY_3: - keys = _iterkeys - """Returns an iterator of all named result keys (Python 3.x only).""" - - values = _itervalues - """Returns an iterator of all named result values (Python 3.x only).""" - - items = _iteritems - """Returns an iterator of all named result key-value tuples (Python 3.x only).""" - - else: - iterkeys = _iterkeys - """Returns an iterator of all named result keys (Python 2.x only).""" - - itervalues = _itervalues - """Returns an iterator of all named result values (Python 2.x only).""" - - iteritems = _iteritems - """Returns an iterator of all named result key-value tuples (Python 2.x only).""" - - def keys( self ): - """Returns all named result keys (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iterkeys()) - - def values( self ): - """Returns all named result values (as a list in Python 2.x, as an iterator in Python 3.x).""" - return list(self.itervalues()) - - def items( self ): - """Returns all named result key-values (as a list of tuples in Python 2.x, as an iterator in Python 3.x).""" - return list(self.iteritems()) - - def haskeys( self ): - """Since keys() returns an iterator, this method is helpful in bypassing - code that looks for the existence of any defined results names.""" - return bool(self.__tokdict) - - def pop( self, *args, **kwargs): - """ - Removes and returns item at specified index (default=C{last}). - Supports both C{list} and C{dict} semantics for C{pop()}. If passed no - argument or an integer argument, it will use C{list} semantics - and pop tokens from the list of parsed tokens. If passed a - non-integer argument (most likely a string), it will use C{dict} - semantics and pop the corresponding value from any defined - results names. A second default return value argument is - supported, just as in C{dict.pop()}. - - Example:: - def remove_first(tokens): - tokens.pop(0) - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - print(OneOrMore(Word(nums)).addParseAction(remove_first).parseString("0 123 321")) # -> ['123', '321'] - - label = Word(alphas) - patt = label("LABEL") + OneOrMore(Word(nums)) - print(patt.parseString("AAB 123 321").dump()) - - # Use pop() in a parse action to remove named result (note that corresponding value is not - # removed from list form of results) - def remove_LABEL(tokens): - tokens.pop("LABEL") - return tokens - patt.addParseAction(remove_LABEL) - print(patt.parseString("AAB 123 321").dump()) - prints:: - ['AAB', '123', '321'] - - LABEL: AAB - - ['AAB', '123', '321'] - """ - if not args: - args = [-1] - for k,v in kwargs.items(): - if k == 'default': - args = (args[0], v) - else: - raise TypeError("pop() got an unexpected keyword argument '%s'" % k) - if (isinstance(args[0], int) or - len(args) == 1 or - args[0] in self): - index = args[0] - ret = self[index] - del self[index] - return ret - else: - defaultvalue = args[1] - return defaultvalue - - def get(self, key, defaultValue=None): - """ - Returns named result matching the given key, or if there is no - such name, then returns the given C{defaultValue} or C{None} if no - C{defaultValue} is specified. - - Similar to C{dict.get()}. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString("1999/12/31") - print(result.get("year")) # -> '1999' - print(result.get("hour", "not specified")) # -> 'not specified' - print(result.get("hour")) # -> None - """ - if key in self: - return self[key] - else: - return defaultValue - - def insert( self, index, insStr ): - """ - Inserts new element at location index in the list of parsed tokens. - - Similar to C{list.insert()}. - - Example:: - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to insert the parse location in the front of the parsed results - def insert_locn(locn, tokens): - tokens.insert(0, locn) - print(OneOrMore(Word(nums)).addParseAction(insert_locn).parseString("0 123 321")) # -> [0, '0', '123', '321'] - """ - self.__toklist.insert(index, insStr) - # fixup indices in token dictionary - for name,occurrences in self.__tokdict.items(): - for k, (value, position) in enumerate(occurrences): - occurrences[k] = _ParseResultsWithOffset(value, position + (position > index)) - - def append( self, item ): - """ - Add single element to end of ParseResults list of elements. - - Example:: - print(OneOrMore(Word(nums)).parseString("0 123 321")) # -> ['0', '123', '321'] - - # use a parse action to compute the sum of the parsed integers, and add it to the end - def append_sum(tokens): - tokens.append(sum(map(int, tokens))) - print(OneOrMore(Word(nums)).addParseAction(append_sum).parseString("0 123 321")) # -> ['0', '123', '321', 444] - """ - self.__toklist.append(item) - - def extend( self, itemseq ): - """ - Add sequence of elements to end of ParseResults list of elements. - - Example:: - patt = OneOrMore(Word(alphas)) - - # use a parse action to append the reverse of the matched strings, to make a palindrome - def make_palindrome(tokens): - tokens.extend(reversed([t[::-1] for t in tokens])) - return ''.join(tokens) - print(patt.addParseAction(make_palindrome).parseString("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' - """ - if isinstance(itemseq, ParseResults): - self += itemseq - else: - self.__toklist.extend(itemseq) - - def clear( self ): - """ - Clear all elements and results names. - """ - del self.__toklist[:] - self.__tokdict.clear() - - def __getattr__( self, name ): - try: - return self[name] - except KeyError: - return "" - - if name in self.__tokdict: - if name not in self.__accumNames: - return self.__tokdict[name][-1][0] - else: - return ParseResults([ v[0] for v in self.__tokdict[name] ]) - else: - return "" - - def __add__( self, other ): - ret = self.copy() - ret += other - return ret - - def __iadd__( self, other ): - if other.__tokdict: - offset = len(self.__toklist) - addoffset = lambda a: offset if a<0 else a+offset - otheritems = other.__tokdict.items() - otherdictitems = [(k, _ParseResultsWithOffset(v[0],addoffset(v[1])) ) - for (k,vlist) in otheritems for v in vlist] - for k,v in otherdictitems: - self[k] = v - if isinstance(v[0],ParseResults): - v[0].__parent = wkref(self) - - self.__toklist += other.__toklist - self.__accumNames.update( other.__accumNames ) - return self - - def __radd__(self, other): - if isinstance(other,int) and other == 0: - # useful for merging many ParseResults using sum() builtin - return self.copy() - else: - # this may raise a TypeError - so be it - return other + self - - def __repr__( self ): - return "(%s, %s)" % ( repr( self.__toklist ), repr( self.__tokdict ) ) - - def __str__( self ): - return '[' + ', '.join(_ustr(i) if isinstance(i, ParseResults) else repr(i) for i in self.__toklist) + ']' - - def _asStringList( self, sep='' ): - out = [] - for item in self.__toklist: - if out and sep: - out.append(sep) - if isinstance( item, ParseResults ): - out += item._asStringList() - else: - out.append( _ustr(item) ) - return out - - def asList( self ): - """ - Returns the parse results as a nested list of matching tokens, all converted to strings. - - Example:: - patt = OneOrMore(Word(alphas)) - result = patt.parseString("sldkj lsdkj sldkj") - # even though the result prints in string-like form, it is actually a pyparsing ParseResults - print(type(result), result) # -> <class 'pyparsing.ParseResults'> ['sldkj', 'lsdkj', 'sldkj'] - - # Use asList() to create an actual list - result_list = result.asList() - print(type(result_list), result_list) # -> <class 'list'> ['sldkj', 'lsdkj', 'sldkj'] - """ - return [res.asList() if isinstance(res,ParseResults) else res for res in self.__toklist] - - def asDict( self ): - """ - Returns the named parse results as a nested dictionary. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(type(result), repr(result)) # -> <class 'pyparsing.ParseResults'> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) - - result_dict = result.asDict() - print(type(result_dict), repr(result_dict)) # -> <class 'dict'> {'day': '1999', 'year': '12', 'month': '31'} - - # even though a ParseResults supports dict-like access, sometime you just need to have a dict - import json - print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable - print(json.dumps(result.asDict())) # -> {"month": "31", "day": "1999", "year": "12"} - """ - if PY_3: - item_fn = self.items - else: - item_fn = self.iteritems - - def toItem(obj): - if isinstance(obj, ParseResults): - if obj.haskeys(): - return obj.asDict() - else: - return [toItem(v) for v in obj] - else: - return obj - - return dict((k,toItem(v)) for k,v in item_fn()) - - def copy( self ): - """ - Returns a new copy of a C{ParseResults} object. - """ - ret = ParseResults( self.__toklist ) - ret.__tokdict = self.__tokdict.copy() - ret.__parent = self.__parent - ret.__accumNames.update( self.__accumNames ) - ret.__name = self.__name - return ret - - def asXML( self, doctag=None, namedItemsOnly=False, indent="", formatted=True ): - """ - (Deprecated) Returns the parse results as XML. Tags are created for tokens and lists that have defined results names. - """ - nl = "\n" - out = [] - namedItems = dict((v[1],k) for (k,vlist) in self.__tokdict.items() - for v in vlist) - nextLevelIndent = indent + " " - - # collapse out indents if formatting is not desired - if not formatted: - indent = "" - nextLevelIndent = "" - nl = "" - - selfTag = None - if doctag is not None: - selfTag = doctag - else: - if self.__name: - selfTag = self.__name - - if not selfTag: - if namedItemsOnly: - return "" - else: - selfTag = "ITEM" - - out += [ nl, indent, "<", selfTag, ">" ] - - for i,res in enumerate(self.__toklist): - if isinstance(res,ParseResults): - if i in namedItems: - out += [ res.asXML(namedItems[i], - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - out += [ res.asXML(None, - namedItemsOnly and doctag is None, - nextLevelIndent, - formatted)] - else: - # individual token, see if there is a name for it - resTag = None - if i in namedItems: - resTag = namedItems[i] - if not resTag: - if namedItemsOnly: - continue - else: - resTag = "ITEM" - xmlBodyText = _xml_escape(_ustr(res)) - out += [ nl, nextLevelIndent, "<", resTag, ">", - xmlBodyText, - "</", resTag, ">" ] - - out += [ nl, indent, "</", selfTag, ">" ] - return "".join(out) - - def __lookup(self,sub): - for k,vlist in self.__tokdict.items(): - for v,loc in vlist: - if sub is v: - return k - return None - - def getName(self): - """ - Returns the results name for this token expression. Useful when several - different expressions might match at a particular location. - - Example:: - integer = Word(nums) - ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") - house_number_expr = Suppress('#') + Word(nums, alphanums) - user_data = (Group(house_number_expr)("house_number") - | Group(ssn_expr)("ssn") - | Group(integer)("age")) - user_info = OneOrMore(user_data) - - result = user_info.parseString("22 111-22-3333 #221B") - for item in result: - print(item.getName(), ':', item[0]) - prints:: - age : 22 - ssn : 111-22-3333 - house_number : 221B - """ - if self.__name: - return self.__name - elif self.__parent: - par = self.__parent() - if par: - return par.__lookup(self) - else: - return None - elif (len(self) == 1 and - len(self.__tokdict) == 1 and - next(iter(self.__tokdict.values()))[0][1] in (0,-1)): - return next(iter(self.__tokdict.keys())) - else: - return None - - def dump(self, indent='', depth=0, full=True): - """ - Diagnostic method for listing out the contents of a C{ParseResults}. - Accepts an optional C{indent} argument so that this string can be embedded - in a nested display of other data. - - Example:: - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - result = date_str.parseString('12/31/1999') - print(result.dump()) - prints:: - ['12', '/', '31', '/', '1999'] - - day: 1999 - - month: 31 - - year: 12 - """ - out = [] - NL = '\n' - out.append( indent+_ustr(self.asList()) ) - if full: - if self.haskeys(): - items = sorted((str(k), v) for k,v in self.items()) - for k,v in items: - if out: - out.append(NL) - out.append( "%s%s- %s: " % (indent,(' '*depth), k) ) - if isinstance(v,ParseResults): - if v: - out.append( v.dump(indent,depth+1) ) - else: - out.append(_ustr(v)) - else: - out.append(repr(v)) - elif any(isinstance(vv,ParseResults) for vv in self): - v = self - for i,vv in enumerate(v): - if isinstance(vv,ParseResults): - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),vv.dump(indent,depth+1) )) - else: - out.append("\n%s%s[%d]:\n%s%s%s" % (indent,(' '*(depth)),i,indent,(' '*(depth+1)),_ustr(vv))) - - return "".join(out) - - def pprint(self, *args, **kwargs): - """ - Pretty-printer for parsed results as a list, using the C{pprint} module. - Accepts additional positional or keyword args as defined for the - C{pprint.pprint} method. (U{http://docs.python.org/3/library/pprint.html#pprint.pprint}) - - Example:: - ident = Word(alphas, alphanums) - num = Word(nums) - func = Forward() - term = ident | num | Group('(' + func + ')') - func <<= ident + Group(Optional(delimitedList(term))) - result = func.parseString("fna a,b,(fnb c,d,200),100") - result.pprint(width=40) - prints:: - ['fna', - ['a', - 'b', - ['(', 'fnb', ['c', 'd', '200'], ')'], - '100']] - """ - pprint.pprint(self.asList(), *args, **kwargs) - - # add support for pickle protocol - def __getstate__(self): - return ( self.__toklist, - ( self.__tokdict.copy(), - self.__parent is not None and self.__parent() or None, - self.__accumNames, - self.__name ) ) - - def __setstate__(self,state): - self.__toklist = state[0] - (self.__tokdict, - par, - inAccumNames, - self.__name) = state[1] - self.__accumNames = {} - self.__accumNames.update(inAccumNames) - if par is not None: - self.__parent = wkref(par) - else: - self.__parent = None - - def __getnewargs__(self): - return self.__toklist, self.__name, self.__asList, self.__modal - - def __dir__(self): - return (dir(type(self)) + list(self.keys())) - -collections.MutableMapping.register(ParseResults) - -def col (loc,strg): - """Returns current column within a string, counting newlines as line separators. - The first column is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - s = strg - return 1 if 0<loc<len(s) and s[loc-1] == '\n' else loc - s.rfind("\n", 0, loc) - -def lineno(loc,strg): - """Returns current line number within a string, counting newlines as line separators. - The first line is number 1. - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{ParserElement.parseString}<ParserElement.parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - """ - return strg.count("\n",0,loc) + 1 - -def line( loc, strg ): - """Returns the line of text containing loc within a string, counting newlines as line separators. - """ - lastCR = strg.rfind("\n", 0, loc) - nextCR = strg.find("\n", loc) - if nextCR >= 0: - return strg[lastCR+1:nextCR] - else: - return strg[lastCR+1:] - -def _defaultStartDebugAction( instring, loc, expr ): - print (("Match " + _ustr(expr) + " at loc " + _ustr(loc) + "(%d,%d)" % ( lineno(loc,instring), col(loc,instring) ))) - -def _defaultSuccessDebugAction( instring, startloc, endloc, expr, toks ): - print ("Matched " + _ustr(expr) + " -> " + str(toks.asList())) - -def _defaultExceptionDebugAction( instring, loc, expr, exc ): - print ("Exception raised:" + _ustr(exc)) - -def nullDebugAction(*args): - """'Do-nothing' debug action, to suppress debugging output during parsing.""" - pass - -# Only works on Python 3.x - nonlocal is toxic to Python 2 installs -#~ 'decorator to trim function calls to match the arity of the target' -#~ def _trim_arity(func, maxargs=3): - #~ if func in singleArgBuiltins: - #~ return lambda s,l,t: func(t) - #~ limit = 0 - #~ foundArity = False - #~ def wrapper(*args): - #~ nonlocal limit,foundArity - #~ while 1: - #~ try: - #~ ret = func(*args[limit:]) - #~ foundArity = True - #~ return ret - #~ except TypeError: - #~ if limit == maxargs or foundArity: - #~ raise - #~ limit += 1 - #~ continue - #~ return wrapper - -# this version is Python 2.x-3.x cross-compatible -'decorator to trim function calls to match the arity of the target' -def _trim_arity(func, maxargs=2): - if func in singleArgBuiltins: - return lambda s,l,t: func(t) - limit = [0] - foundArity = [False] - - # traceback return data structure changed in Py3.5 - normalize back to plain tuples - if system_version[:2] >= (3,5): - def extract_stack(limit=0): - # special handling for Python 3.5.0 - extra deep call stack by 1 - offset = -3 if system_version == (3,5,0) else -2 - frame_summary = traceback.extract_stack(limit=-offset+limit-1)[offset] - return [(frame_summary.filename, frame_summary.lineno)] - def extract_tb(tb, limit=0): - frames = traceback.extract_tb(tb, limit=limit) - frame_summary = frames[-1] - return [(frame_summary.filename, frame_summary.lineno)] - else: - extract_stack = traceback.extract_stack - extract_tb = traceback.extract_tb - - # synthesize what would be returned by traceback.extract_stack at the call to - # user's parse action 'func', so that we don't incur call penalty at parse time - - LINE_DIFF = 6 - # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND - # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! - this_line = extract_stack(limit=2)[-1] - pa_call_line_synth = (this_line[0], this_line[1]+LINE_DIFF) - - def wrapper(*args): - while 1: - try: - ret = func(*args[limit[0]:]) - foundArity[0] = True - return ret - except TypeError: - # re-raise TypeErrors if they did not come from our arity testing - if foundArity[0]: - raise - else: - try: - tb = sys.exc_info()[-1] - if not extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth: - raise - finally: - del tb - - if limit[0] <= maxargs: - limit[0] += 1 - continue - raise - - # copy func name to wrapper for sensible debug output - func_name = "<parse action>" - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - wrapper.__name__ = func_name - - return wrapper - -class ParserElement(object): - """Abstract base level parser element class.""" - DEFAULT_WHITE_CHARS = " \n\t\r" - verbose_stacktrace = False - - @staticmethod - def setDefaultWhitespaceChars( chars ): - r""" - Overrides the default whitespace chars - - Example:: - # default whitespace chars are space, <TAB> and newline - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] - - # change to just treat newline as significant - ParserElement.setDefaultWhitespaceChars(" \t") - OneOrMore(Word(alphas)).parseString("abc def\nghi jkl") # -> ['abc', 'def'] - """ - ParserElement.DEFAULT_WHITE_CHARS = chars - - @staticmethod - def inlineLiteralsUsing(cls): - """ - Set class to be used for inclusion of string literals into a parser. - - Example:: - # default literal class used is Literal - integer = Word(nums) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - - # change to Suppress - ParserElement.inlineLiteralsUsing(Suppress) - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - - date_str.parseString("1999/12/31") # -> ['1999', '12', '31'] - """ - ParserElement._literalStringClass = cls - - def __init__( self, savelist=False ): - self.parseAction = list() - self.failAction = None - #~ self.name = "<unknown>" # don't define self.name, let subclasses try/except upcall - self.strRepr = None - self.resultsName = None - self.saveAsList = savelist - self.skipWhitespace = True - self.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - self.copyDefaultWhiteChars = True - self.mayReturnEmpty = False # used when checking for left-recursion - self.keepTabs = False - self.ignoreExprs = list() - self.debug = False - self.streamlined = False - self.mayIndexError = True # used to optimize exception handling for subclasses that don't advance parse index - self.errmsg = "" - self.modalResults = True # used to mark results names as modal (report only last) or cumulative (list all) - self.debugActions = ( None, None, None ) #custom debug actions - self.re = None - self.callPreparse = True # used to avoid redundant calls to preParse - self.callDuringTry = False - - def copy( self ): - """ - Make a copy of this C{ParserElement}. Useful for defining different parse actions - for the same parsing pattern, using copies of the original parse element. - - Example:: - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - integerK = integer.copy().addParseAction(lambda toks: toks[0]*1024) + Suppress("K") - integerM = integer.copy().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - - print(OneOrMore(integerK | integerM | integer).parseString("5K 100 640K 256M")) - prints:: - [5120, 100, 655360, 268435456] - Equivalent form of C{expr.copy()} is just C{expr()}:: - integerM = integer().addParseAction(lambda toks: toks[0]*1024*1024) + Suppress("M") - """ - cpy = copy.copy( self ) - cpy.parseAction = self.parseAction[:] - cpy.ignoreExprs = self.ignoreExprs[:] - if self.copyDefaultWhiteChars: - cpy.whiteChars = ParserElement.DEFAULT_WHITE_CHARS - return cpy - - def setName( self, name ): - """ - Define name for this expression, makes debugging and exception messages clearer. - - Example:: - Word(nums).parseString("ABC") # -> Exception: Expected W:(0123...) (at char 0), (line:1, col:1) - Word(nums).setName("integer").parseString("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) - """ - self.name = name - self.errmsg = "Expected " + self.name - if hasattr(self,"exception"): - self.exception.msg = self.errmsg - return self - - def setResultsName( self, name, listAllMatches=False ): - """ - Define name for referencing matching tokens as a nested attribute - of the returned parse results. - NOTE: this returns a *copy* of the original C{ParserElement} object; - this is so that the client can define a basic element, such as an - integer, and reference it in multiple places with different names. - - You can also set results names using the abbreviated syntax, - C{expr("name")} in place of C{expr.setResultsName("name")} - - see L{I{__call__}<__call__>}. - - Example:: - date_str = (integer.setResultsName("year") + '/' - + integer.setResultsName("month") + '/' - + integer.setResultsName("day")) - - # equivalent form: - date_str = integer("year") + '/' + integer("month") + '/' + integer("day") - """ - newself = self.copy() - if name.endswith("*"): - name = name[:-1] - listAllMatches=True - newself.resultsName = name - newself.modalResults = not listAllMatches - return newself - - def setBreak(self,breakFlag = True): - """Method to invoke the Python pdb debugger when this element is - about to be parsed. Set C{breakFlag} to True to enable, False to - disable. - """ - if breakFlag: - _parseMethod = self._parse - def breaker(instring, loc, doActions=True, callPreParse=True): - import pdb - pdb.set_trace() - return _parseMethod( instring, loc, doActions, callPreParse ) - breaker._originalParseMethod = _parseMethod - self._parse = breaker - else: - if hasattr(self._parse,"_originalParseMethod"): - self._parse = self._parse._originalParseMethod - return self - - def setParseAction( self, *fns, **kwargs ): - """ - Define action to perform when successfully matching parse element definition. - Parse action fn is a callable method with 0-3 arguments, called as C{fn(s,loc,toks)}, - C{fn(loc,toks)}, C{fn(toks)}, or just C{fn()}, where: - - s = the original string being parsed (see note below) - - loc = the location of the matching substring - - toks = a list of the matched tokens, packaged as a C{L{ParseResults}} object - If the functions in fns modify the tokens, they can return them as the return - value from fn, and the modified list of tokens will replace the original. - Otherwise, fn does not need to return any value. - - Optional keyword arguments: - - callDuringTry = (default=C{False}) indicate if parse action should be run during lookaheads and alternate testing - - Note: the default parsing behavior is to expand tabs in the input string - before starting the parsing process. See L{I{parseString}<parseString>} for more information - on parsing strings containing C{<TAB>}s, and suggested methods to maintain a - consistent view of the parsed string, the parse location, and line and column - positions within the parsed string. - - Example:: - integer = Word(nums) - date_str = integer + '/' + integer + '/' + integer - - date_str.parseString("1999/12/31") # -> ['1999', '/', '12', '/', '31'] - - # use parse action to convert to ints at parse time - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - date_str = integer + '/' + integer + '/' + integer - - # note that integer fields are now ints, not strings - date_str.parseString("1999/12/31") # -> [1999, '/', 12, '/', 31] - """ - self.parseAction = list(map(_trim_arity, list(fns))) - self.callDuringTry = kwargs.get("callDuringTry", False) - return self - - def addParseAction( self, *fns, **kwargs ): - """ - Add parse action to expression's list of parse actions. See L{I{setParseAction}<setParseAction>}. - - See examples in L{I{copy}<copy>}. - """ - self.parseAction += list(map(_trim_arity, list(fns))) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def addCondition(self, *fns, **kwargs): - """Add a boolean predicate function to expression's list of parse actions. See - L{I{setParseAction}<setParseAction>} for function call signatures. Unlike C{setParseAction}, - functions passed to C{addCondition} need to return boolean success/fail of the condition. - - Optional keyword arguments: - - message = define a custom message to be used in the raised exception - - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise ParseException - - Example:: - integer = Word(nums).setParseAction(lambda toks: int(toks[0])) - year_int = integer.copy() - year_int.addCondition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") - date_str = year_int + '/' + integer + '/' + integer - - result = date_str.parseString("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), (line:1, col:1) - """ - msg = kwargs.get("message", "failed user-defined condition") - exc_type = ParseFatalException if kwargs.get("fatal", False) else ParseException - for fn in fns: - def pa(s,l,t): - if not bool(_trim_arity(fn)(s,l,t)): - raise exc_type(s,l,msg) - self.parseAction.append(pa) - self.callDuringTry = self.callDuringTry or kwargs.get("callDuringTry", False) - return self - - def setFailAction( self, fn ): - """Define action to perform if parsing fails at this expression. - Fail acton fn is a callable function that takes the arguments - C{fn(s,loc,expr,err)} where: - - s = string being parsed - - loc = location where expression match was attempted and failed - - expr = the parse expression that failed - - err = the exception thrown - The function returns no value. It may throw C{L{ParseFatalException}} - if it is desired to stop parsing immediately.""" - self.failAction = fn - return self - - def _skipIgnorables( self, instring, loc ): - exprsFound = True - while exprsFound: - exprsFound = False - for e in self.ignoreExprs: - try: - while 1: - loc,dummy = e._parse( instring, loc ) - exprsFound = True - except ParseException: - pass - return loc - - def preParse( self, instring, loc ): - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - - if self.skipWhitespace: - wt = self.whiteChars - instrlen = len(instring) - while loc < instrlen and instring[loc] in wt: - loc += 1 - - return loc - - def parseImpl( self, instring, loc, doActions=True ): - return loc, [] - - def postParse( self, instring, loc, tokenlist ): - return tokenlist - - #~ @profile - def _parseNoCache( self, instring, loc, doActions=True, callPreParse=True ): - debugging = ( self.debug ) #and doActions ) - - if debugging or self.failAction: - #~ print ("Match",self,"at loc",loc,"(%d,%d)" % ( lineno(loc,instring), col(loc,instring) )) - if (self.debugActions[0] ): - self.debugActions[0]( instring, loc, self ) - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - try: - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - except ParseBaseException as err: - #~ print ("Exception raised:", err) - if self.debugActions[2]: - self.debugActions[2]( instring, tokensStart, self, err ) - if self.failAction: - self.failAction( instring, tokensStart, self, err ) - raise - else: - if callPreParse and self.callPreparse: - preloc = self.preParse( instring, loc ) - else: - preloc = loc - tokensStart = preloc - if self.mayIndexError or loc >= len(instring): - try: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - except IndexError: - raise ParseException( instring, len(instring), self.errmsg, self ) - else: - loc,tokens = self.parseImpl( instring, preloc, doActions ) - - tokens = self.postParse( instring, loc, tokens ) - - retTokens = ParseResults( tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults ) - if self.parseAction and (doActions or self.callDuringTry): - if debugging: - try: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - except ParseBaseException as err: - #~ print "Exception raised in user parse action:", err - if (self.debugActions[2] ): - self.debugActions[2]( instring, tokensStart, self, err ) - raise - else: - for fn in self.parseAction: - tokens = fn( instring, tokensStart, retTokens ) - if tokens is not None: - retTokens = ParseResults( tokens, - self.resultsName, - asList=self.saveAsList and isinstance(tokens,(ParseResults,list)), - modal=self.modalResults ) - - if debugging: - #~ print ("Matched",self,"->",retTokens.asList()) - if (self.debugActions[1] ): - self.debugActions[1]( instring, tokensStart, loc, self, retTokens ) - - return loc, retTokens - - def tryParse( self, instring, loc ): - try: - return self._parse( instring, loc, doActions=False )[0] - except ParseFatalException: - raise ParseException( instring, loc, self.errmsg, self) - - def canParseNext(self, instring, loc): - try: - self.tryParse(instring, loc) - except (ParseException, IndexError): - return False - else: - return True - - class _UnboundedCache(object): - def __init__(self): - cache = {} - self.not_in_cache = not_in_cache = object() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - - def clear(self): - cache.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - if _OrderedDict is not None: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = _OrderedDict() - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - if len(cache) > size: - cache.popitem(False) - - def clear(self): - cache.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - else: - class _FifoCache(object): - def __init__(self, size): - self.not_in_cache = not_in_cache = object() - - cache = {} - key_fifo = collections.deque([], size) - - def get(self, key): - return cache.get(key, not_in_cache) - - def set(self, key, value): - cache[key] = value - if len(cache) > size: - cache.pop(key_fifo.popleft(), None) - key_fifo.append(key) - - def clear(self): - cache.clear() - key_fifo.clear() - - self.get = types.MethodType(get, self) - self.set = types.MethodType(set, self) - self.clear = types.MethodType(clear, self) - - # argument cache for optimizing repeated calls when backtracking through recursive expressions - packrat_cache = {} # this is set later by enabledPackrat(); this is here so that resetCache() doesn't fail - packrat_cache_lock = RLock() - packrat_cache_stats = [0, 0] - - # this method gets repeatedly called during backtracking with the same arguments - - # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression - def _parseCache( self, instring, loc, doActions=True, callPreParse=True ): - HIT, MISS = 0, 1 - lookup = (self, instring, loc, callPreParse, doActions) - with ParserElement.packrat_cache_lock: - cache = ParserElement.packrat_cache - value = cache.get(lookup) - if value is cache.not_in_cache: - ParserElement.packrat_cache_stats[MISS] += 1 - try: - value = self._parseNoCache(instring, loc, doActions, callPreParse) - except ParseBaseException as pe: - # cache a copy of the exception, without the traceback - cache.set(lookup, pe.__class__(*pe.args)) - raise - else: - cache.set(lookup, (value[0], value[1].copy())) - return value - else: - ParserElement.packrat_cache_stats[HIT] += 1 - if isinstance(value, Exception): - raise value - return (value[0], value[1].copy()) - - _parse = _parseNoCache - - @staticmethod - def resetCache(): - ParserElement.packrat_cache.clear() - ParserElement.packrat_cache_stats[:] = [0] * len(ParserElement.packrat_cache_stats) - - _packratEnabled = False - @staticmethod - def enablePackrat(cache_size_limit=128): - """Enables "packrat" parsing, which adds memoizing to the parsing logic. - Repeated parse attempts at the same string location (which happens - often in many complex grammars) can immediately return a cached value, - instead of re-executing parsing/validating code. Memoizing is done of - both valid results and parsing exceptions. - - Parameters: - - cache_size_limit - (default=C{128}) - if an integer value is provided - will limit the size of the packrat cache; if None is passed, then - the cache size will be unbounded; if 0 is passed, the cache will - be effectively disabled. - - This speedup may break existing programs that use parse actions that - have side-effects. For this reason, packrat parsing is disabled when - you first import pyparsing. To activate the packrat feature, your - program must call the class method C{ParserElement.enablePackrat()}. If - your program uses C{psyco} to "compile as you go", you must call - C{enablePackrat} before calling C{psyco.full()}. If you do not do this, - Python will crash. For best results, call C{enablePackrat()} immediately - after importing pyparsing. - - Example:: - import pyparsing - pyparsing.ParserElement.enablePackrat() - """ - if not ParserElement._packratEnabled: - ParserElement._packratEnabled = True - if cache_size_limit is None: - ParserElement.packrat_cache = ParserElement._UnboundedCache() - else: - ParserElement.packrat_cache = ParserElement._FifoCache(cache_size_limit) - ParserElement._parse = ParserElement._parseCache - - def parseString( self, instring, parseAll=False ): - """ - Execute the parse expression with the given string. - This is the main interface to the client code, once the complete - expression has been built. - - If you want the grammar to require that the entire input string be - successfully parsed, then set C{parseAll} to True (equivalent to ending - the grammar with C{L{StringEnd()}}). - - Note: C{parseString} implicitly calls C{expandtabs()} on the input string, - in order to report proper column numbers in parse actions. - If the input string contains tabs and - the grammar uses parse actions that use the C{loc} argument to index into the - string being parsed, you can ensure you have a consistent view of the input - string by: - - calling C{parseWithTabs} on your grammar before calling C{parseString} - (see L{I{parseWithTabs}<parseWithTabs>}) - - define your parse action using the full C{(s,loc,toks)} signature, and - reference the input string using the parse action's C{s} argument - - explictly expand the tabs in your input string before calling - C{parseString} - - Example:: - Word('a').parseString('aaaaabaaa') # -> ['aaaaa'] - Word('a').parseString('aaaaabaaa', parseAll=True) # -> Exception: Expected end of text - """ - ParserElement.resetCache() - if not self.streamlined: - self.streamline() - #~ self.saveAsList = True - for e in self.ignoreExprs: - e.streamline() - if not self.keepTabs: - instring = instring.expandtabs() - try: - loc, tokens = self._parse( instring, 0 ) - if parseAll: - loc = self.preParse( instring, loc ) - se = Empty() + StringEnd() - se._parse( instring, loc ) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - else: - return tokens - - def scanString( self, instring, maxMatches=_MAX_INT, overlap=False ): - """ - Scan the input string for expression matches. Each match will return the - matching tokens, start location, and end location. May be called with optional - C{maxMatches} argument, to clip scanning after 'n' matches are found. If - C{overlap} is specified, then overlapping matches will be reported. - - Note that the start and end locations are reported relative to the string - being parsed. See L{I{parseString}<parseString>} for more information on parsing - strings with embedded tabs. - - Example:: - source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" - print(source) - for tokens,start,end in Word(alphas).scanString(source): - print(' '*start + '^'*(end-start)) - print(' '*start + tokens[0]) - - prints:: - - sldjf123lsdjjkf345sldkjf879lkjsfd987 - ^^^^^ - sldjf - ^^^^^^^ - lsdjjkf - ^^^^^^ - sldkjf - ^^^^^^ - lkjsfd - """ - if not self.streamlined: - self.streamline() - for e in self.ignoreExprs: - e.streamline() - - if not self.keepTabs: - instring = _ustr(instring).expandtabs() - instrlen = len(instring) - loc = 0 - preparseFn = self.preParse - parseFn = self._parse - ParserElement.resetCache() - matches = 0 - try: - while loc <= instrlen and matches < maxMatches: - try: - preloc = preparseFn( instring, loc ) - nextLoc,tokens = parseFn( instring, preloc, callPreParse=False ) - except ParseException: - loc = preloc+1 - else: - if nextLoc > loc: - matches += 1 - yield tokens, preloc, nextLoc - if overlap: - nextloc = preparseFn( instring, loc ) - if nextloc > loc: - loc = nextLoc - else: - loc += 1 - else: - loc = nextLoc - else: - loc = preloc+1 - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def transformString( self, instring ): - """ - Extension to C{L{scanString}}, to modify matching text with modified tokens that may - be returned from a parse action. To use C{transformString}, define a grammar and - attach a parse action to it that modifies the returned token list. - Invoking C{transformString()} on a target string will then scan for matches, - and replace the matched text patterns according to the logic in the parse - action. C{transformString()} returns the resulting transformed string. - - Example:: - wd = Word(alphas) - wd.setParseAction(lambda toks: toks[0].title()) - - print(wd.transformString("now is the winter of our discontent made glorious summer by this sun of york.")) - Prints:: - Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. - """ - out = [] - lastE = 0 - # force preservation of <TAB>s, to minimize unwanted transformation of string, and to - # keep string locs straight between transformString and scanString - self.keepTabs = True - try: - for t,s,e in self.scanString( instring ): - out.append( instring[lastE:s] ) - if t: - if isinstance(t,ParseResults): - out += t.asList() - elif isinstance(t,list): - out += t - else: - out.append(t) - lastE = e - out.append(instring[lastE:]) - out = [o for o in out if o] - return "".join(map(_ustr,_flatten(out))) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def searchString( self, instring, maxMatches=_MAX_INT ): - """ - Another extension to C{L{scanString}}, simplifying the access to the tokens found - to match the given parse expression. May be called with optional - C{maxMatches} argument, to clip searching after 'n' matches are found. - - Example:: - # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters - cap_word = Word(alphas.upper(), alphas.lower()) - - print(cap_word.searchString("More than Iron, more than Lead, more than Gold I need Electricity")) - prints:: - ['More', 'Iron', 'Lead', 'Gold', 'I'] - """ - try: - return ParseResults([ t for t,s,e in self.scanString( instring, maxMatches ) ]) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def split(self, instring, maxsplit=_MAX_INT, includeSeparators=False): - """ - Generator method to split a string using the given expression as a separator. - May be called with optional C{maxsplit} argument, to limit the number of splits; - and the optional C{includeSeparators} argument (default=C{False}), if the separating - matching text should be included in the split results. - - Example:: - punc = oneOf(list(".,;:/-!?")) - print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) - prints:: - ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] - """ - splits = 0 - last = 0 - for t,s,e in self.scanString(instring, maxMatches=maxsplit): - yield instring[last:s] - if includeSeparators: - yield t[0] - last = e - yield instring[last:] - - def __add__(self, other ): - """ - Implementation of + operator - returns C{L{And}}. Adding strings to a ParserElement - converts them to L{Literal}s by default. - - Example:: - greet = Word(alphas) + "," + Word(alphas) + "!" - hello = "Hello, World!" - print (hello, "->", greet.parseString(hello)) - Prints:: - Hello, World! -> ['Hello', ',', 'World', '!'] - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, other ] ) - - def __radd__(self, other ): - """ - Implementation of + operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other + self - - def __sub__(self, other): - """ - Implementation of - operator, returns C{L{And}} with error stop - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return And( [ self, And._ErrorStop(), other ] ) - - def __rsub__(self, other ): - """ - Implementation of - operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other - self - - def __mul__(self,other): - """ - Implementation of * operator, allows use of C{expr * 3} in place of - C{expr + expr + expr}. Expressions may also me multiplied by a 2-integer - tuple, similar to C{{min,max}} multipliers in regular expressions. Tuples - may also include C{None} as in: - - C{expr*(n,None)} or C{expr*(n,)} is equivalent - to C{expr*n + L{ZeroOrMore}(expr)} - (read as "at least n instances of C{expr}") - - C{expr*(None,n)} is equivalent to C{expr*(0,n)} - (read as "0 to n instances of C{expr}") - - C{expr*(None,None)} is equivalent to C{L{ZeroOrMore}(expr)} - - C{expr*(1,None)} is equivalent to C{L{OneOrMore}(expr)} - - Note that C{expr*(None,n)} does not raise an exception if - more than n exprs exist in the input stream; that is, - C{expr*(None,n)} does not enforce a maximum number of expr - occurrences. If this behavior is desired, then write - C{expr*(None,n) + ~expr} - """ - if isinstance(other,int): - minElements, optElements = other,0 - elif isinstance(other,tuple): - other = (other + (None, None))[:2] - if other[0] is None: - other = (0, other[1]) - if isinstance(other[0],int) and other[1] is None: - if other[0] == 0: - return ZeroOrMore(self) - if other[0] == 1: - return OneOrMore(self) - else: - return self*other[0] + ZeroOrMore(self) - elif isinstance(other[0],int) and isinstance(other[1],int): - minElements, optElements = other - optElements -= minElements - else: - raise TypeError("cannot multiply 'ParserElement' and ('%s','%s') objects", type(other[0]),type(other[1])) - else: - raise TypeError("cannot multiply 'ParserElement' and '%s' objects", type(other)) - - if minElements < 0: - raise ValueError("cannot multiply ParserElement by negative value") - if optElements < 0: - raise ValueError("second tuple value must be greater or equal to first tuple value") - if minElements == optElements == 0: - raise ValueError("cannot multiply ParserElement by 0 or (0,0)") - - if (optElements): - def makeOptionalList(n): - if n>1: - return Optional(self + makeOptionalList(n-1)) - else: - return Optional(self) - if minElements: - if minElements == 1: - ret = self + makeOptionalList(optElements) - else: - ret = And([self]*minElements) + makeOptionalList(optElements) - else: - ret = makeOptionalList(optElements) - else: - if minElements == 1: - ret = self - else: - ret = And([self]*minElements) - return ret - - def __rmul__(self, other): - return self.__mul__(other) - - def __or__(self, other ): - """ - Implementation of | operator - returns C{L{MatchFirst}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return MatchFirst( [ self, other ] ) - - def __ror__(self, other ): - """ - Implementation of | operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other | self - - def __xor__(self, other ): - """ - Implementation of ^ operator - returns C{L{Or}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Or( [ self, other ] ) - - def __rxor__(self, other ): - """ - Implementation of ^ operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other ^ self - - def __and__(self, other ): - """ - Implementation of & operator - returns C{L{Each}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return Each( [ self, other ] ) - - def __rand__(self, other ): - """ - Implementation of & operator when left operand is not a C{L{ParserElement}} - """ - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - if not isinstance( other, ParserElement ): - warnings.warn("Cannot combine element of type %s with ParserElement" % type(other), - SyntaxWarning, stacklevel=2) - return None - return other & self - - def __invert__( self ): - """ - Implementation of ~ operator - returns C{L{NotAny}} - """ - return NotAny( self ) - - def __call__(self, name=None): - """ - Shortcut for C{L{setResultsName}}, with C{listAllMatches=False}. - - If C{name} is given with a trailing C{'*'} character, then C{listAllMatches} will be - passed as C{True}. - - If C{name} is omitted, same as calling C{L{copy}}. - - Example:: - # these are equivalent - userdata = Word(alphas).setResultsName("name") + Word(nums+"-").setResultsName("socsecno") - userdata = Word(alphas)("name") + Word(nums+"-")("socsecno") - """ - if name is not None: - return self.setResultsName(name) - else: - return self.copy() - - def suppress( self ): - """ - Suppresses the output of this C{ParserElement}; useful to keep punctuation from - cluttering up returned output. - """ - return Suppress( self ) - - def leaveWhitespace( self ): - """ - Disables the skipping of whitespace before matching the characters in the - C{ParserElement}'s defined pattern. This is normally only used internally by - the pyparsing module, but may be needed in some whitespace-sensitive grammars. - """ - self.skipWhitespace = False - return self - - def setWhitespaceChars( self, chars ): - """ - Overrides the default whitespace chars - """ - self.skipWhitespace = True - self.whiteChars = chars - self.copyDefaultWhiteChars = False - return self - - def parseWithTabs( self ): - """ - Overrides default behavior to expand C{<TAB>}s to spaces before parsing the input string. - Must be called before C{parseString} when the input grammar contains elements that - match C{<TAB>} characters. - """ - self.keepTabs = True - return self - - def ignore( self, other ): - """ - Define expression to be ignored (e.g., comments) while doing pattern - matching; may be called repeatedly, to define multiple comment or other - ignorable patterns. - - Example:: - patt = OneOrMore(Word(alphas)) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj'] - - patt.ignore(cStyleComment) - patt.parseString('ablaj /* comment */ lskjd') # -> ['ablaj', 'lskjd'] - """ - if isinstance(other, basestring): - other = Suppress(other) - - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - self.ignoreExprs.append(other) - else: - self.ignoreExprs.append( Suppress( other.copy() ) ) - return self - - def setDebugActions( self, startAction, successAction, exceptionAction ): - """ - Enable display of debugging messages while doing pattern matching. - """ - self.debugActions = (startAction or _defaultStartDebugAction, - successAction or _defaultSuccessDebugAction, - exceptionAction or _defaultExceptionDebugAction) - self.debug = True - return self - - def setDebug( self, flag=True ): - """ - Enable display of debugging messages while doing pattern matching. - Set C{flag} to True to enable, False to disable. - - Example:: - wd = Word(alphas).setName("alphaword") - integer = Word(nums).setName("numword") - term = wd | integer - - # turn on debugging for wd - wd.setDebug() - - OneOrMore(term).parseString("abc 123 xyz 890") - - prints:: - Match alphaword at loc 0(1,1) - Matched alphaword -> ['abc'] - Match alphaword at loc 3(1,4) - Exception raised:Expected alphaword (at char 4), (line:1, col:5) - Match alphaword at loc 7(1,8) - Matched alphaword -> ['xyz'] - Match alphaword at loc 11(1,12) - Exception raised:Expected alphaword (at char 12), (line:1, col:13) - Match alphaword at loc 15(1,16) - Exception raised:Expected alphaword (at char 15), (line:1, col:16) - - The output shown is that produced by the default debug actions - custom debug actions can be - specified using L{setDebugActions}. Prior to attempting - to match the C{wd} expression, the debugging message C{"Match <exprname> at loc <n>(<line>,<col>)"} - is shown. Then if the parse succeeds, a C{"Matched"} message is shown, or an C{"Exception raised"} - message is shown. Also note the use of L{setName} to assign a human-readable name to the expression, - which makes debugging and exception messages easier to understand - for instance, the default - name created for the C{Word} expression without calling C{setName} is C{"W:(ABCD...)"}. - """ - if flag: - self.setDebugActions( _defaultStartDebugAction, _defaultSuccessDebugAction, _defaultExceptionDebugAction ) - else: - self.debug = False - return self - - def __str__( self ): - return self.name - - def __repr__( self ): - return _ustr(self) - - def streamline( self ): - self.streamlined = True - self.strRepr = None - return self - - def checkRecursion( self, parseElementList ): - pass - - def validate( self, validateTrace=[] ): - """ - Check defined expressions for valid structure, check for infinite recursive definitions. - """ - self.checkRecursion( [] ) - - def parseFile( self, file_or_filename, parseAll=False ): - """ - Execute the parse expression on the given file or filename. - If a filename is specified (instead of a file object), - the entire file is opened, read, and closed before parsing. - """ - try: - file_contents = file_or_filename.read() - except AttributeError: - with open(file_or_filename, "r") as f: - file_contents = f.read() - try: - return self.parseString(file_contents, parseAll) - except ParseBaseException as exc: - if ParserElement.verbose_stacktrace: - raise - else: - # catch and re-raise exception from here, clears out pyparsing internal stack trace - raise exc - - def __eq__(self,other): - if isinstance(other, ParserElement): - return self is other or vars(self) == vars(other) - elif isinstance(other, basestring): - return self.matches(other) - else: - return super(ParserElement,self)==other - - def __ne__(self,other): - return not (self == other) - - def __hash__(self): - return hash(id(self)) - - def __req__(self,other): - return self == other - - def __rne__(self,other): - return not (self == other) - - def matches(self, testString, parseAll=True): - """ - Method for quick testing of a parser against a test string. Good for simple - inline microtests of sub expressions while building up larger parser. - - Parameters: - - testString - to test against this expression for a match - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - - Example:: - expr = Word(nums) - assert expr.matches("100") - """ - try: - self.parseString(_ustr(testString), parseAll=parseAll) - return True - except ParseBaseException: - return False - - def runTests(self, tests, parseAll=True, comment='#', fullDump=True, printResults=True, failureTests=False): - """ - Execute the parse expression on a series of test strings, showing each - test, the parsed results or where the parse failed. Quick and easy way to - run a parse expression against a list of sample strings. - - Parameters: - - tests - a list of separate test strings, or a multiline string of test strings - - parseAll - (default=C{True}) - flag to pass to C{L{parseString}} when running tests - - comment - (default=C{'#'}) - expression for indicating embedded comments in the test - string; pass None to disable comment filtering - - fullDump - (default=C{True}) - dump results as list followed by results names in nested outline; - if False, only dump nested list - - printResults - (default=C{True}) prints test output to stdout - - failureTests - (default=C{False}) indicates if these tests are expected to fail parsing - - Returns: a (success, results) tuple, where success indicates that all tests succeeded - (or failed if C{failureTests} is True), and the results contain a list of lines of each - test's output - - Example:: - number_expr = pyparsing_common.number.copy() - - result = number_expr.runTests(''' - # unsigned integer - 100 - # negative integer - -100 - # float with scientific notation - 6.02e23 - # integer with scientific notation - 1e-12 - ''') - print("Success" if result[0] else "Failed!") - - result = number_expr.runTests(''' - # stray character - 100Z - # missing leading digit before '.' - -.100 - # too many '.' - 3.14.159 - ''', failureTests=True) - print("Success" if result[0] else "Failed!") - prints:: - # unsigned integer - 100 - [100] - - # negative integer - -100 - [-100] - - # float with scientific notation - 6.02e23 - [6.02e+23] - - # integer with scientific notation - 1e-12 - [1e-12] - - Success - - # stray character - 100Z - ^ - FAIL: Expected end of text (at char 3), (line:1, col:4) - - # missing leading digit before '.' - -.100 - ^ - FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) - - # too many '.' - 3.14.159 - ^ - FAIL: Expected end of text (at char 4), (line:1, col:5) - - Success - - Each test string must be on a single line. If you want to test a string that spans multiple - lines, create a test like this:: - - expr.runTest(r"this is a test\\n of strings that spans \\n 3 lines") - - (Note that this is a raw string literal, you must include the leading 'r'.) - """ - if isinstance(tests, basestring): - tests = list(map(str.strip, tests.rstrip().splitlines())) - if isinstance(comment, basestring): - comment = Literal(comment) - allResults = [] - comments = [] - success = True - for t in tests: - if comment is not None and comment.matches(t, False) or comments and not t: - comments.append(t) - continue - if not t: - continue - out = ['\n'.join(comments), t] - comments = [] - try: - t = t.replace(r'\n','\n') - result = self.parseString(t, parseAll=parseAll) - out.append(result.dump(full=fullDump)) - success = success and not failureTests - except ParseBaseException as pe: - fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" - if '\n' in t: - out.append(line(pe.loc, t)) - out.append(' '*(col(pe.loc,t)-1) + '^' + fatal) - else: - out.append(' '*pe.loc + '^' + fatal) - out.append("FAIL: " + str(pe)) - success = success and failureTests - result = pe - except Exception as exc: - out.append("FAIL-EXCEPTION: " + str(exc)) - success = success and failureTests - result = exc - - if printResults: - if fullDump: - out.append('') - print('\n'.join(out)) - - allResults.append((t, result)) - - return success, allResults - - -class Token(ParserElement): - """ - Abstract C{ParserElement} subclass, for defining atomic matching patterns. - """ - def __init__( self ): - super(Token,self).__init__( savelist=False ) - - -class Empty(Token): - """ - An empty token, will always match. - """ - def __init__( self ): - super(Empty,self).__init__() - self.name = "Empty" - self.mayReturnEmpty = True - self.mayIndexError = False - - -class NoMatch(Token): - """ - A token that will never match. - """ - def __init__( self ): - super(NoMatch,self).__init__() - self.name = "NoMatch" - self.mayReturnEmpty = True - self.mayIndexError = False - self.errmsg = "Unmatchable token" - - def parseImpl( self, instring, loc, doActions=True ): - raise ParseException(instring, loc, self.errmsg, self) - - -class Literal(Token): - """ - Token to exactly match a specified string. - - Example:: - Literal('blah').parseString('blah') # -> ['blah'] - Literal('blah').parseString('blahfooblah') # -> ['blah'] - Literal('blah').parseString('bla') # -> Exception: Expected "blah" - - For case-insensitive matching, use L{CaselessLiteral}. - - For keyword matching (force word break before and after the matched string), - use L{Keyword} or L{CaselessKeyword}. - """ - def __init__( self, matchString ): - super(Literal,self).__init__() - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Literal; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.__class__ = Empty - self.name = '"%s"' % _ustr(self.match) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - - # Performance tuning: this routine gets called a *lot* - # if this is a single character match string and the first character matches, - # short-circuit as quickly as possible, and avoid calling startswith - #~ @profile - def parseImpl( self, instring, loc, doActions=True ): - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) -_L = Literal -ParserElement._literalStringClass = Literal - -class Keyword(Token): - """ - Token to exactly match a specified string as a keyword, that is, it must be - immediately followed by a non-keyword character. Compare with C{L{Literal}}: - - C{Literal("if")} will match the leading C{'if'} in C{'ifAndOnlyIf'}. - - C{Keyword("if")} will not; it will only match the leading C{'if'} in C{'if x=1'}, or C{'if(y==2)'} - Accepts two optional constructor arguments in addition to the keyword string: - - C{identChars} is a string of characters that would be valid identifier characters, - defaulting to all alphanumerics + "_" and "$" - - C{caseless} allows case-insensitive matching, default is C{False}. - - Example:: - Keyword("start").parseString("start") # -> ['start'] - Keyword("start").parseString("starting") # -> Exception - - For case-insensitive matching, use L{CaselessKeyword}. - """ - DEFAULT_KEYWORD_CHARS = alphanums+"_$" - - def __init__( self, matchString, identChars=None, caseless=False ): - super(Keyword,self).__init__() - if identChars is None: - identChars = Keyword.DEFAULT_KEYWORD_CHARS - self.match = matchString - self.matchLen = len(matchString) - try: - self.firstMatchChar = matchString[0] - except IndexError: - warnings.warn("null string passed to Keyword; use Empty() instead", - SyntaxWarning, stacklevel=2) - self.name = '"%s"' % self.match - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = False - self.mayIndexError = False - self.caseless = caseless - if caseless: - self.caselessmatch = matchString.upper() - identChars = identChars.upper() - self.identChars = set(identChars) - - def parseImpl( self, instring, loc, doActions=True ): - if self.caseless: - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) and - (loc == 0 or instring[loc-1].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - else: - if (instring[loc] == self.firstMatchChar and - (self.matchLen==1 or instring.startswith(self.match,loc)) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen] not in self.identChars) and - (loc == 0 or instring[loc-1] not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - - def copy(self): - c = super(Keyword,self).copy() - c.identChars = Keyword.DEFAULT_KEYWORD_CHARS - return c - - @staticmethod - def setDefaultKeywordChars( chars ): - """Overrides the default Keyword chars - """ - Keyword.DEFAULT_KEYWORD_CHARS = chars - -class CaselessLiteral(Literal): - """ - Token to match a specified string, ignoring case of letters. - Note: the matched results will always be in the case of the given - match string, NOT the case of the input text. - - Example:: - OneOrMore(CaselessLiteral("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD', 'CMD'] - - (Contrast with example for L{CaselessKeyword}.) - """ - def __init__( self, matchString ): - super(CaselessLiteral,self).__init__( matchString.upper() ) - # Preserve the defining literal. - self.returnString = matchString - self.name = "'%s'" % self.returnString - self.errmsg = "Expected " + self.name - - def parseImpl( self, instring, loc, doActions=True ): - if instring[ loc:loc+self.matchLen ].upper() == self.match: - return loc+self.matchLen, self.returnString - raise ParseException(instring, loc, self.errmsg, self) - -class CaselessKeyword(Keyword): - """ - Caseless version of L{Keyword}. - - Example:: - OneOrMore(CaselessKeyword("CMD")).parseString("cmd CMD Cmd10") # -> ['CMD', 'CMD'] - - (Contrast with example for L{CaselessLiteral}.) - """ - def __init__( self, matchString, identChars=None ): - super(CaselessKeyword,self).__init__( matchString, identChars, caseless=True ) - - def parseImpl( self, instring, loc, doActions=True ): - if ( (instring[ loc:loc+self.matchLen ].upper() == self.caselessmatch) and - (loc >= len(instring)-self.matchLen or instring[loc+self.matchLen].upper() not in self.identChars) ): - return loc+self.matchLen, self.match - raise ParseException(instring, loc, self.errmsg, self) - -class CloseMatch(Token): - """ - A variation on L{Literal} which matches "close" matches, that is, - strings with at most 'n' mismatching characters. C{CloseMatch} takes parameters: - - C{match_string} - string to be matched - - C{maxMismatches} - (C{default=1}) maximum number of mismatches allowed to count as a match - - The results from a successful parse will contain the matched text from the input string and the following named results: - - C{mismatches} - a list of the positions within the match_string where mismatches were found - - C{original} - the original match_string used to compare against the input string - - If C{mismatches} is an empty list, then the match was an exact match. - - Example:: - patt = CloseMatch("ATCATCGAATGGA") - patt.parseString("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) - patt.parseString("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) - - # exact match - patt.parseString("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) - - # close match allowing up to 2 mismatches - patt = CloseMatch("ATCATCGAATGGA", maxMismatches=2) - patt.parseString("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) - """ - def __init__(self, match_string, maxMismatches=1): - super(CloseMatch,self).__init__() - self.name = match_string - self.match_string = match_string - self.maxMismatches = maxMismatches - self.errmsg = "Expected %r (with up to %d mismatches)" % (self.match_string, self.maxMismatches) - self.mayIndexError = False - self.mayReturnEmpty = False - - def parseImpl( self, instring, loc, doActions=True ): - start = loc - instrlen = len(instring) - maxloc = start + len(self.match_string) - - if maxloc <= instrlen: - match_string = self.match_string - match_stringloc = 0 - mismatches = [] - maxMismatches = self.maxMismatches - - for match_stringloc,s_m in enumerate(zip(instring[loc:maxloc], self.match_string)): - src,mat = s_m - if src != mat: - mismatches.append(match_stringloc) - if len(mismatches) > maxMismatches: - break - else: - loc = match_stringloc + 1 - results = ParseResults([instring[start:loc]]) - results['original'] = self.match_string - results['mismatches'] = mismatches - return loc, results - - raise ParseException(instring, loc, self.errmsg, self) - - -class Word(Token): - """ - Token for matching words composed of allowed character sets. - Defined with string containing all allowed initial characters, - an optional string containing allowed body characters (if omitted, - defaults to the initial character set), and an optional minimum, - maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. An optional - C{excludeChars} parameter can list characters that might be found in - the input C{bodyChars} string; useful to define a word of all printables - except for one or two characters, for instance. - - L{srange} is useful for defining custom character set strings for defining - C{Word} expressions, using range notation from regular expression character sets. - - A common mistake is to use C{Word} to match a specific literal string, as in - C{Word("Address")}. Remember that C{Word} uses the string argument to define - I{sets} of matchable characters. This expression would match "Add", "AAA", - "dAred", or any other word made up of the characters 'A', 'd', 'r', 'e', and 's'. - To match an exact literal string, use L{Literal} or L{Keyword}. - - pyparsing includes helper strings for building Words: - - L{alphas} - - L{nums} - - L{alphanums} - - L{hexnums} - - L{alphas8bit} (alphabetic characters in ASCII range 128-255 - accented, tilded, umlauted, etc.) - - L{punc8bit} (non-alphabetic characters in ASCII range 128-255 - currency, symbols, superscripts, diacriticals, etc.) - - L{printables} (any non-whitespace character) - - Example:: - # a word composed of digits - integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) - - # a word with a leading capital, and zero or more lowercase - capital_word = Word(alphas.upper(), alphas.lower()) - - # hostnames are alphanumeric, with leading alpha, and '-' - hostname = Word(alphas, alphanums+'-') - - # roman numeral (not a strict parser, accepts invalid mix of characters) - roman = Word("IVXLCDM") - - # any string of non-whitespace characters, except for ',' - csv_value = Word(printables, excludeChars=",") - """ - def __init__( self, initChars, bodyChars=None, min=1, max=0, exact=0, asKeyword=False, excludeChars=None ): - super(Word,self).__init__() - if excludeChars: - initChars = ''.join(c for c in initChars if c not in excludeChars) - if bodyChars: - bodyChars = ''.join(c for c in bodyChars if c not in excludeChars) - self.initCharsOrig = initChars - self.initChars = set(initChars) - if bodyChars : - self.bodyCharsOrig = bodyChars - self.bodyChars = set(bodyChars) - else: - self.bodyCharsOrig = initChars - self.bodyChars = set(initChars) - - self.maxSpecified = max > 0 - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(Word()) if zero-length word is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.asKeyword = asKeyword - - if ' ' not in self.initCharsOrig+self.bodyCharsOrig and (min==1 and max==0 and exact==0): - if self.bodyCharsOrig == self.initCharsOrig: - self.reString = "[%s]+" % _escapeRegexRangeChars(self.initCharsOrig) - elif len(self.initCharsOrig) == 1: - self.reString = "%s[%s]*" % \ - (re.escape(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - else: - self.reString = "[%s][%s]*" % \ - (_escapeRegexRangeChars(self.initCharsOrig), - _escapeRegexRangeChars(self.bodyCharsOrig),) - if self.asKeyword: - self.reString = r"\b"+self.reString+r"\b" - try: - self.re = re.compile( self.reString ) - except Exception: - self.re = None - - def parseImpl( self, instring, loc, doActions=True ): - if self.re: - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - return loc, result.group() - - if not(instring[ loc ] in self.initChars): - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - instrlen = len(instring) - bodychars = self.bodyChars - maxloc = start + self.maxLen - maxloc = min( maxloc, instrlen ) - while loc < maxloc and instring[loc] in bodychars: - loc += 1 - - throwException = False - if loc - start < self.minLen: - throwException = True - if self.maxSpecified and loc < instrlen and instring[loc] in bodychars: - throwException = True - if self.asKeyword: - if (start>0 and instring[start-1] in bodychars) or (loc<instrlen and instring[loc] in bodychars): - throwException = True - - if throwException: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(Word,self).__str__() - except Exception: - pass - - - if self.strRepr is None: - - def charsAsStr(s): - if len(s)>4: - return s[:4]+"..." - else: - return s - - if ( self.initCharsOrig != self.bodyCharsOrig ): - self.strRepr = "W:(%s,%s)" % ( charsAsStr(self.initCharsOrig), charsAsStr(self.bodyCharsOrig) ) - else: - self.strRepr = "W:(%s)" % charsAsStr(self.initCharsOrig) - - return self.strRepr - - -class Regex(Token): - """ - Token for matching strings that match a given regular expression. - Defined with string specifying the regular expression in a form recognized by the inbuilt Python re module. - If the given regex contains named groups (defined using C{(?P<name>...)}), these will be preserved as - named parse results. - - Example:: - realnum = Regex(r"[+-]?\d+\.\d*") - date = Regex(r'(?P<year>\d{4})-(?P<month>\d\d?)-(?P<day>\d\d?)') - # ref: http://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression - roman = Regex(r"M{0,4}(CM|CD|D?C{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") - """ - compiledREtype = type(re.compile("[A-Z]")) - def __init__( self, pattern, flags=0): - """The parameters C{pattern} and C{flags} are passed to the C{re.compile()} function as-is. See the Python C{re} module for an explanation of the acceptable patterns and flags.""" - super(Regex,self).__init__() - - if isinstance(pattern, basestring): - if not pattern: - warnings.warn("null string passed to Regex; use Empty() instead", - SyntaxWarning, stacklevel=2) - - self.pattern = pattern - self.flags = flags - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % pattern, - SyntaxWarning, stacklevel=2) - raise - - elif isinstance(pattern, Regex.compiledREtype): - self.re = pattern - self.pattern = \ - self.reString = str(pattern) - self.flags = flags - - else: - raise ValueError("Regex may only be constructed with a string or a compiled RE object") - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = self.re.match(instring,loc) - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - d = result.groupdict() - ret = ParseResults(result.group()) - if d: - for k in d: - ret[k] = d[k] - return loc,ret - - def __str__( self ): - try: - return super(Regex,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "Re:(%s)" % repr(self.pattern) - - return self.strRepr - - -class QuotedString(Token): - r""" - Token for matching strings that are delimited by quoting characters. - - Defined with the following parameters: - - quoteChar - string of one or more characters defining the quote delimiting string - - escChar - character to escape quotes, typically backslash (default=C{None}) - - escQuote - special quote sequence to escape an embedded quote string (such as SQL's "" to escape an embedded ") (default=C{None}) - - multiline - boolean indicating whether quotes can span multiple lines (default=C{False}) - - unquoteResults - boolean indicating whether the matched text should be unquoted (default=C{True}) - - endQuoteChar - string of one or more characters defining the end of the quote delimited string (default=C{None} => same as quoteChar) - - convertWhitespaceEscapes - convert escaped whitespace (C{'\t'}, C{'\n'}, etc.) to actual whitespace (default=C{True}) - - Example:: - qs = QuotedString('"') - print(qs.searchString('lsjdf "This is the quote" sldjf')) - complex_qs = QuotedString('{{', endQuoteChar='}}') - print(complex_qs.searchString('lsjdf {{This is the "quote"}} sldjf')) - sql_qs = QuotedString('"', escQuote='""') - print(sql_qs.searchString('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) - prints:: - [['This is the quote']] - [['This is the "quote"']] - [['This is the quote with "embedded" quotes']] - """ - def __init__( self, quoteChar, escChar=None, escQuote=None, multiline=False, unquoteResults=True, endQuoteChar=None, convertWhitespaceEscapes=True): - super(QuotedString,self).__init__() - - # remove white space from quote chars - wont work anyway - quoteChar = quoteChar.strip() - if not quoteChar: - warnings.warn("quoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - if endQuoteChar is None: - endQuoteChar = quoteChar - else: - endQuoteChar = endQuoteChar.strip() - if not endQuoteChar: - warnings.warn("endQuoteChar cannot be the empty string",SyntaxWarning,stacklevel=2) - raise SyntaxError() - - self.quoteChar = quoteChar - self.quoteCharLen = len(quoteChar) - self.firstQuoteChar = quoteChar[0] - self.endQuoteChar = endQuoteChar - self.endQuoteCharLen = len(endQuoteChar) - self.escChar = escChar - self.escQuote = escQuote - self.unquoteResults = unquoteResults - self.convertWhitespaceEscapes = convertWhitespaceEscapes - - if multiline: - self.flags = re.MULTILINE | re.DOTALL - self.pattern = r'%s(?:[^%s%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - else: - self.flags = 0 - self.pattern = r'%s(?:[^%s\n\r%s]' % \ - ( re.escape(self.quoteChar), - _escapeRegexRangeChars(self.endQuoteChar[0]), - (escChar is not None and _escapeRegexRangeChars(escChar) or '') ) - if len(self.endQuoteChar) > 1: - self.pattern += ( - '|(?:' + ')|(?:'.join("%s[^%s]" % (re.escape(self.endQuoteChar[:i]), - _escapeRegexRangeChars(self.endQuoteChar[i])) - for i in range(len(self.endQuoteChar)-1,0,-1)) + ')' - ) - if escQuote: - self.pattern += (r'|(?:%s)' % re.escape(escQuote)) - if escChar: - self.pattern += (r'|(?:%s.)' % re.escape(escChar)) - self.escCharReplacePattern = re.escape(self.escChar)+"(.)" - self.pattern += (r')*%s' % re.escape(self.endQuoteChar)) - - try: - self.re = re.compile(self.pattern, self.flags) - self.reString = self.pattern - except sre_constants.error: - warnings.warn("invalid pattern (%s) passed to Regex" % self.pattern, - SyntaxWarning, stacklevel=2) - raise - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayIndexError = False - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - result = instring[loc] == self.firstQuoteChar and self.re.match(instring,loc) or None - if not result: - raise ParseException(instring, loc, self.errmsg, self) - - loc = result.end() - ret = result.group() - - if self.unquoteResults: - - # strip off quotes - ret = ret[self.quoteCharLen:-self.endQuoteCharLen] - - if isinstance(ret,basestring): - # replace escaped whitespace - if '\\' in ret and self.convertWhitespaceEscapes: - ws_map = { - r'\t' : '\t', - r'\n' : '\n', - r'\f' : '\f', - r'\r' : '\r', - } - for wslit,wschar in ws_map.items(): - ret = ret.replace(wslit, wschar) - - # replace escaped characters - if self.escChar: - ret = re.sub(self.escCharReplacePattern,"\g<1>",ret) - - # replace escaped quotes - if self.escQuote: - ret = ret.replace(self.escQuote, self.endQuoteChar) - - return loc, ret - - def __str__( self ): - try: - return super(QuotedString,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "quoted string, starting with %s ending with %s" % (self.quoteChar, self.endQuoteChar) - - return self.strRepr - - -class CharsNotIn(Token): - """ - Token for matching words composed of characters I{not} in a given set (will - include whitespace in matched characters if not listed in the provided exclusion set - see example). - Defined with string containing all disallowed characters, and an optional - minimum, maximum, and/or exact length. The default value for C{min} is 1 (a - minimum value < 1 is not valid); the default values for C{max} and C{exact} - are 0, meaning no maximum or exact length restriction. - - Example:: - # define a comma-separated-value as anything that is not a ',' - csv_value = CharsNotIn(',') - print(delimitedList(csv_value).parseString("dkls,lsdkjf,s12 34,@!#,213")) - prints:: - ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] - """ - def __init__( self, notChars, min=1, max=0, exact=0 ): - super(CharsNotIn,self).__init__() - self.skipWhitespace = False - self.notChars = notChars - - if min < 1: - raise ValueError("cannot specify a minimum length < 1; use Optional(CharsNotIn()) if zero-length char group is permitted") - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - self.name = _ustr(self) - self.errmsg = "Expected " + self.name - self.mayReturnEmpty = ( self.minLen == 0 ) - self.mayIndexError = False - - def parseImpl( self, instring, loc, doActions=True ): - if instring[loc] in self.notChars: - raise ParseException(instring, loc, self.errmsg, self) - - start = loc - loc += 1 - notchars = self.notChars - maxlen = min( start+self.maxLen, len(instring) ) - while loc < maxlen and \ - (instring[loc] not in notchars): - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - def __str__( self ): - try: - return super(CharsNotIn, self).__str__() - except Exception: - pass - - if self.strRepr is None: - if len(self.notChars) > 4: - self.strRepr = "!W:(%s...)" % self.notChars[:4] - else: - self.strRepr = "!W:(%s)" % self.notChars - - return self.strRepr - -class White(Token): - """ - Special matching class for matching whitespace. Normally, whitespace is ignored - by pyparsing grammars. This class is included when some whitespace structures - are significant. Define with a string containing the whitespace characters to be - matched; default is C{" \\t\\r\\n"}. Also takes optional C{min}, C{max}, and C{exact} arguments, - as defined for the C{L{Word}} class. - """ - whiteStrs = { - " " : "<SPC>", - "\t": "<TAB>", - "\n": "<LF>", - "\r": "<CR>", - "\f": "<FF>", - } - def __init__(self, ws=" \t\r\n", min=1, max=0, exact=0): - super(White,self).__init__() - self.matchWhite = ws - self.setWhitespaceChars( "".join(c for c in self.whiteChars if c not in self.matchWhite) ) - #~ self.leaveWhitespace() - self.name = ("".join(White.whiteStrs[c] for c in self.matchWhite)) - self.mayReturnEmpty = True - self.errmsg = "Expected " + self.name - - self.minLen = min - - if max > 0: - self.maxLen = max - else: - self.maxLen = _MAX_INT - - if exact > 0: - self.maxLen = exact - self.minLen = exact - - def parseImpl( self, instring, loc, doActions=True ): - if not(instring[ loc ] in self.matchWhite): - raise ParseException(instring, loc, self.errmsg, self) - start = loc - loc += 1 - maxloc = start + self.maxLen - maxloc = min( maxloc, len(instring) ) - while loc < maxloc and instring[loc] in self.matchWhite: - loc += 1 - - if loc - start < self.minLen: - raise ParseException(instring, loc, self.errmsg, self) - - return loc, instring[start:loc] - - -class _PositionToken(Token): - def __init__( self ): - super(_PositionToken,self).__init__() - self.name=self.__class__.__name__ - self.mayReturnEmpty = True - self.mayIndexError = False - -class GoToColumn(_PositionToken): - """ - Token to advance to a specific column of input text; useful for tabular report scraping. - """ - def __init__( self, colno ): - super(GoToColumn,self).__init__() - self.col = colno - - def preParse( self, instring, loc ): - if col(loc,instring) != self.col: - instrlen = len(instring) - if self.ignoreExprs: - loc = self._skipIgnorables( instring, loc ) - while loc < instrlen and instring[loc].isspace() and col( loc, instring ) != self.col : - loc += 1 - return loc - - def parseImpl( self, instring, loc, doActions=True ): - thiscol = col( loc, instring ) - if thiscol > self.col: - raise ParseException( instring, loc, "Text not in expected column", self ) - newloc = loc + self.col - thiscol - ret = instring[ loc: newloc ] - return newloc, ret - - -class LineStart(_PositionToken): - """ - Matches if current position is at the beginning of a line within the parse string - - Example:: - - test = '''\ - AAA this line - AAA and this line - AAA but not this one - B AAA and definitely not this one - ''' - - for t in (LineStart() + 'AAA' + restOfLine).searchString(test): - print(t) - - Prints:: - ['AAA', ' this line'] - ['AAA', ' and this line'] - - """ - def __init__( self ): - super(LineStart,self).__init__() - self.errmsg = "Expected start of line" - - def parseImpl( self, instring, loc, doActions=True ): - if col(loc, instring) == 1: - return loc, [] - raise ParseException(instring, loc, self.errmsg, self) - -class LineEnd(_PositionToken): - """ - Matches if current position is at the end of a line within the parse string - """ - def __init__( self ): - super(LineEnd,self).__init__() - self.setWhitespaceChars( ParserElement.DEFAULT_WHITE_CHARS.replace("\n","") ) - self.errmsg = "Expected end of line" - - def parseImpl( self, instring, loc, doActions=True ): - if loc<len(instring): - if instring[loc] == "\n": - return loc+1, "\n" - else: - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class StringStart(_PositionToken): - """ - Matches if current position is at the beginning of the parse string - """ - def __init__( self ): - super(StringStart,self).__init__() - self.errmsg = "Expected start of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc != 0: - # see if entire string up to here is just whitespace and ignoreables - if loc != self.preParse( instring, 0 ): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class StringEnd(_PositionToken): - """ - Matches if current position is at the end of the parse string - """ - def __init__( self ): - super(StringEnd,self).__init__() - self.errmsg = "Expected end of text" - - def parseImpl( self, instring, loc, doActions=True ): - if loc < len(instring): - raise ParseException(instring, loc, self.errmsg, self) - elif loc == len(instring): - return loc+1, [] - elif loc > len(instring): - return loc, [] - else: - raise ParseException(instring, loc, self.errmsg, self) - -class WordStart(_PositionToken): - """ - Matches if the current position is at the beginning of a Word, and - is not preceded by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordStart(alphanums)}. C{WordStart} will also match at the beginning of - the string being parsed, or at the beginning of a line. - """ - def __init__(self, wordChars = printables): - super(WordStart,self).__init__() - self.wordChars = set(wordChars) - self.errmsg = "Not at the start of a word" - - def parseImpl(self, instring, loc, doActions=True ): - if loc != 0: - if (instring[loc-1] in self.wordChars or - instring[loc] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - -class WordEnd(_PositionToken): - """ - Matches if the current position is at the end of a Word, and - is not followed by any character in a given set of C{wordChars} - (default=C{printables}). To emulate the C{\b} behavior of regular expressions, - use C{WordEnd(alphanums)}. C{WordEnd} will also match at the end of - the string being parsed, or at the end of a line. - """ - def __init__(self, wordChars = printables): - super(WordEnd,self).__init__() - self.wordChars = set(wordChars) - self.skipWhitespace = False - self.errmsg = "Not at the end of a word" - - def parseImpl(self, instring, loc, doActions=True ): - instrlen = len(instring) - if instrlen>0 and loc<instrlen: - if (instring[loc] in self.wordChars or - instring[loc-1] not in self.wordChars): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - -class ParseExpression(ParserElement): - """ - Abstract subclass of ParserElement, for combining and post-processing parsed tokens. - """ - def __init__( self, exprs, savelist = False ): - super(ParseExpression,self).__init__(savelist) - if isinstance( exprs, _generatorType ): - exprs = list(exprs) - - if isinstance( exprs, basestring ): - self.exprs = [ ParserElement._literalStringClass( exprs ) ] - elif isinstance( exprs, collections.Iterable ): - exprs = list(exprs) - # if sequence of strings provided, wrap with Literal - if all(isinstance(expr, basestring) for expr in exprs): - exprs = map(ParserElement._literalStringClass, exprs) - self.exprs = list(exprs) - else: - try: - self.exprs = list( exprs ) - except TypeError: - self.exprs = [ exprs ] - self.callPreparse = False - - def __getitem__( self, i ): - return self.exprs[i] - - def append( self, other ): - self.exprs.append( other ) - self.strRepr = None - return self - - def leaveWhitespace( self ): - """Extends C{leaveWhitespace} defined in base class, and also invokes C{leaveWhitespace} on - all contained expressions.""" - self.skipWhitespace = False - self.exprs = [ e.copy() for e in self.exprs ] - for e in self.exprs: - e.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - else: - super( ParseExpression, self).ignore( other ) - for e in self.exprs: - e.ignore( self.ignoreExprs[-1] ) - return self - - def __str__( self ): - try: - return super(ParseExpression,self).__str__() - except Exception: - pass - - if self.strRepr is None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.exprs) ) - return self.strRepr - - def streamline( self ): - super(ParseExpression,self).streamline() - - for e in self.exprs: - e.streamline() - - # collapse nested And's of the form And( And( And( a,b), c), d) to And( a,b,c,d ) - # but only if there are no parse actions or resultsNames on the nested And's - # (likewise for Or's and MatchFirst's) - if ( len(self.exprs) == 2 ): - other = self.exprs[0] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = other.exprs[:] + [ self.exprs[1] ] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - other = self.exprs[-1] - if ( isinstance( other, self.__class__ ) and - not(other.parseAction) and - other.resultsName is None and - not other.debug ): - self.exprs = self.exprs[:-1] + other.exprs[:] - self.strRepr = None - self.mayReturnEmpty |= other.mayReturnEmpty - self.mayIndexError |= other.mayIndexError - - self.errmsg = "Expected " + _ustr(self) - - return self - - def setResultsName( self, name, listAllMatches=False ): - ret = super(ParseExpression,self).setResultsName(name,listAllMatches) - return ret - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - for e in self.exprs: - e.validate(tmp) - self.checkRecursion( [] ) - - def copy(self): - ret = super(ParseExpression,self).copy() - ret.exprs = [e.copy() for e in self.exprs] - return ret - -class And(ParseExpression): - """ - Requires all given C{ParseExpression}s to be found in the given order. - Expressions may be separated by whitespace. - May be constructed using the C{'+'} operator. - May also be constructed using the C{'-'} operator, which will suppress backtracking. - - Example:: - integer = Word(nums) - name_expr = OneOrMore(Word(alphas)) - - expr = And([integer("id"),name_expr("name"),integer("age")]) - # more easily written as: - expr = integer("id") + name_expr("name") + integer("age") - """ - - class _ErrorStop(Empty): - def __init__(self, *args, **kwargs): - super(And._ErrorStop,self).__init__(*args, **kwargs) - self.name = '-' - self.leaveWhitespace() - - def __init__( self, exprs, savelist = True ): - super(And,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.setWhitespaceChars( self.exprs[0].whiteChars ) - self.skipWhitespace = self.exprs[0].skipWhitespace - self.callPreparse = True - - def parseImpl( self, instring, loc, doActions=True ): - # pass False as last arg to _parse for first element, since we already - # pre-parsed the string as part of our And pre-parsing - loc, resultlist = self.exprs[0]._parse( instring, loc, doActions, callPreParse=False ) - errorStop = False - for e in self.exprs[1:]: - if isinstance(e, And._ErrorStop): - errorStop = True - continue - if errorStop: - try: - loc, exprtokens = e._parse( instring, loc, doActions ) - except ParseSyntaxException: - raise - except ParseBaseException as pe: - pe.__traceback__ = None - raise ParseSyntaxException._from_exception(pe) - except IndexError: - raise ParseSyntaxException(instring, len(instring), self.errmsg, self) - else: - loc, exprtokens = e._parse( instring, loc, doActions ) - if exprtokens or exprtokens.haskeys(): - resultlist += exprtokens - return loc, resultlist - - def __iadd__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #And( [ self, other ] ) - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - if not e.mayReturnEmpty: - break - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - -class Or(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the expression that matches the longest string will be used. - May be constructed using the C{'^'} operator. - - Example:: - # construct Or using '^' operator - - number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) - prints:: - [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(Or,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - matches = [] - for e in self.exprs: - try: - loc2 = e.tryParse( instring, loc ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - else: - # save match among all matches, to retry longest to shortest - matches.append((loc2, e)) - - if matches: - matches.sort(key=lambda x: -x[0]) - for _,e in matches: - try: - return e._parse( instring, loc, doActions ) - except ParseException as err: - err.__traceback__ = None - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - - def __ixor__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #Or( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " ^ ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class MatchFirst(ParseExpression): - """ - Requires that at least one C{ParseExpression} is found. - If two expressions match, the first one listed is the one that will match. - May be constructed using the C{'|'} operator. - - Example:: - # construct MatchFirst using '|' operator - - # watch the order of expressions to match - number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) - print(number.searchString("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] - - # put more selective expression first - number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) - print(number.searchString("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] - """ - def __init__( self, exprs, savelist = False ): - super(MatchFirst,self).__init__(exprs, savelist) - if self.exprs: - self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) - else: - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - maxExcLoc = -1 - maxException = None - for e in self.exprs: - try: - ret = e._parse( instring, loc, doActions ) - return ret - except ParseException as err: - if err.loc > maxExcLoc: - maxException = err - maxExcLoc = err.loc - except IndexError: - if len(instring) > maxExcLoc: - maxException = ParseException(instring,len(instring),e.errmsg,self) - maxExcLoc = len(instring) - - # only got here if no expression matched, raise exception for match that made it the furthest - else: - if maxException is not None: - maxException.msg = self.errmsg - raise maxException - else: - raise ParseException(instring, loc, "no defined alternatives to match", self) - - def __ior__(self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass( other ) - return self.append( other ) #MatchFirst( [ self, other ] ) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " | ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class Each(ParseExpression): - """ - Requires all given C{ParseExpression}s to be found, but in any order. - Expressions may be separated by whitespace. - May be constructed using the C{'&'} operator. - - Example:: - color = oneOf("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") - shape_type = oneOf("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") - integer = Word(nums) - shape_attr = "shape:" + shape_type("shape") - posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") - color_attr = "color:" + color("color") - size_attr = "size:" + integer("size") - - # use Each (using operator '&') to accept attributes in any order - # (shape and posn are required, color and size are optional) - shape_spec = shape_attr & posn_attr & Optional(color_attr) & Optional(size_attr) - - shape_spec.runTests(''' - shape: SQUARE color: BLACK posn: 100, 120 - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - color:GREEN size:20 shape:TRIANGLE posn:20,40 - ''' - ) - prints:: - shape: SQUARE color: BLACK posn: 100, 120 - ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] - - color: BLACK - - posn: ['100', ',', '120'] - - x: 100 - - y: 120 - - shape: SQUARE - - - shape: CIRCLE size: 50 color: BLUE posn: 50,80 - ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] - - color: BLUE - - posn: ['50', ',', '80'] - - x: 50 - - y: 80 - - shape: CIRCLE - - size: 50 - - - color: GREEN size: 20 shape: TRIANGLE posn: 20,40 - ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] - - color: GREEN - - posn: ['20', ',', '40'] - - x: 20 - - y: 40 - - shape: TRIANGLE - - size: 20 - """ - def __init__( self, exprs, savelist = True ): - super(Each,self).__init__(exprs, savelist) - self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) - self.skipWhitespace = True - self.initExprGroups = True - - def parseImpl( self, instring, loc, doActions=True ): - if self.initExprGroups: - self.opt1map = dict((id(e.expr),e) for e in self.exprs if isinstance(e,Optional)) - opt1 = [ e.expr for e in self.exprs if isinstance(e,Optional) ] - opt2 = [ e for e in self.exprs if e.mayReturnEmpty and not isinstance(e,Optional)] - self.optionals = opt1 + opt2 - self.multioptionals = [ e.expr for e in self.exprs if isinstance(e,ZeroOrMore) ] - self.multirequired = [ e.expr for e in self.exprs if isinstance(e,OneOrMore) ] - self.required = [ e for e in self.exprs if not isinstance(e,(Optional,ZeroOrMore,OneOrMore)) ] - self.required += self.multirequired - self.initExprGroups = False - tmpLoc = loc - tmpReqd = self.required[:] - tmpOpt = self.optionals[:] - matchOrder = [] - - keepMatching = True - while keepMatching: - tmpExprs = tmpReqd + tmpOpt + self.multioptionals + self.multirequired - failed = [] - for e in tmpExprs: - try: - tmpLoc = e.tryParse( instring, tmpLoc ) - except ParseException: - failed.append(e) - else: - matchOrder.append(self.opt1map.get(id(e),e)) - if e in tmpReqd: - tmpReqd.remove(e) - elif e in tmpOpt: - tmpOpt.remove(e) - if len(failed) == len(tmpExprs): - keepMatching = False - - if tmpReqd: - missing = ", ".join(_ustr(e) for e in tmpReqd) - raise ParseException(instring,loc,"Missing one or more required elements (%s)" % missing ) - - # add any unmatched Optionals, in case they have default values defined - matchOrder += [e for e in self.exprs if isinstance(e,Optional) and e.expr in tmpOpt] - - resultlist = [] - for e in matchOrder: - loc,results = e._parse(instring,loc,doActions) - resultlist.append(results) - - finalResults = sum(resultlist, ParseResults([])) - return loc, finalResults - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + " & ".join(_ustr(e) for e in self.exprs) + "}" - - return self.strRepr - - def checkRecursion( self, parseElementList ): - subRecCheckList = parseElementList[:] + [ self ] - for e in self.exprs: - e.checkRecursion( subRecCheckList ) - - -class ParseElementEnhance(ParserElement): - """ - Abstract subclass of C{ParserElement}, for combining and post-processing parsed tokens. - """ - def __init__( self, expr, savelist=False ): - super(ParseElementEnhance,self).__init__(savelist) - if isinstance( expr, basestring ): - if issubclass(ParserElement._literalStringClass, Token): - expr = ParserElement._literalStringClass(expr) - else: - expr = ParserElement._literalStringClass(Literal(expr)) - self.expr = expr - self.strRepr = None - if expr is not None: - self.mayIndexError = expr.mayIndexError - self.mayReturnEmpty = expr.mayReturnEmpty - self.setWhitespaceChars( expr.whiteChars ) - self.skipWhitespace = expr.skipWhitespace - self.saveAsList = expr.saveAsList - self.callPreparse = expr.callPreparse - self.ignoreExprs.extend(expr.ignoreExprs) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr is not None: - return self.expr._parse( instring, loc, doActions, callPreParse=False ) - else: - raise ParseException("",loc,self.errmsg,self) - - def leaveWhitespace( self ): - self.skipWhitespace = False - self.expr = self.expr.copy() - if self.expr is not None: - self.expr.leaveWhitespace() - return self - - def ignore( self, other ): - if isinstance( other, Suppress ): - if other not in self.ignoreExprs: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - else: - super( ParseElementEnhance, self).ignore( other ) - if self.expr is not None: - self.expr.ignore( self.ignoreExprs[-1] ) - return self - - def streamline( self ): - super(ParseElementEnhance,self).streamline() - if self.expr is not None: - self.expr.streamline() - return self - - def checkRecursion( self, parseElementList ): - if self in parseElementList: - raise RecursiveGrammarException( parseElementList+[self] ) - subRecCheckList = parseElementList[:] + [ self ] - if self.expr is not None: - self.expr.checkRecursion( subRecCheckList ) - - def validate( self, validateTrace=[] ): - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion( [] ) - - def __str__( self ): - try: - return super(ParseElementEnhance,self).__str__() - except Exception: - pass - - if self.strRepr is None and self.expr is not None: - self.strRepr = "%s:(%s)" % ( self.__class__.__name__, _ustr(self.expr) ) - return self.strRepr - - -class FollowedBy(ParseElementEnhance): - """ - Lookahead matching of the given parse expression. C{FollowedBy} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression matches at the current - position. C{FollowedBy} always returns a null token list. - - Example:: - # use FollowedBy to match a label only if it is followed by a ':' - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - OneOrMore(attr_expr).parseString("shape: SQUARE color: BLACK posn: upper left").pprint() - prints:: - [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] - """ - def __init__( self, expr ): - super(FollowedBy,self).__init__(expr) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - self.expr.tryParse( instring, loc ) - return loc, [] - - -class NotAny(ParseElementEnhance): - """ - Lookahead to disallow matching with the given parse expression. C{NotAny} - does I{not} advance the parsing position within the input string, it only - verifies that the specified parse expression does I{not} match at the current - position. Also, C{NotAny} does I{not} skip over leading whitespace. C{NotAny} - always returns a null token list. May be constructed using the '~' operator. - - Example:: - - """ - def __init__( self, expr ): - super(NotAny,self).__init__(expr) - #~ self.leaveWhitespace() - self.skipWhitespace = False # do NOT use self.leaveWhitespace(), don't want to propagate to exprs - self.mayReturnEmpty = True - self.errmsg = "Found unwanted token, "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - if self.expr.canParseNext(instring, loc): - raise ParseException(instring, loc, self.errmsg, self) - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "~{" + _ustr(self.expr) + "}" - - return self.strRepr - -class _MultipleMatch(ParseElementEnhance): - def __init__( self, expr, stopOn=None): - super(_MultipleMatch, self).__init__(expr) - self.saveAsList = True - ender = stopOn - if isinstance(ender, basestring): - ender = ParserElement._literalStringClass(ender) - self.not_ender = ~ender if ender is not None else None - - def parseImpl( self, instring, loc, doActions=True ): - self_expr_parse = self.expr._parse - self_skip_ignorables = self._skipIgnorables - check_ender = self.not_ender is not None - if check_ender: - try_not_ender = self.not_ender.tryParse - - # must be at least one (but first see if we are the stopOn sentinel; - # if so, fail) - if check_ender: - try_not_ender(instring, loc) - loc, tokens = self_expr_parse( instring, loc, doActions, callPreParse=False ) - try: - hasIgnoreExprs = (not not self.ignoreExprs) - while 1: - if check_ender: - try_not_ender(instring, loc) - if hasIgnoreExprs: - preloc = self_skip_ignorables( instring, loc ) - else: - preloc = loc - loc, tmptokens = self_expr_parse( instring, preloc, doActions ) - if tmptokens or tmptokens.haskeys(): - tokens += tmptokens - except (ParseException,IndexError): - pass - - return loc, tokens - -class OneOrMore(_MultipleMatch): - """ - Repetition of one or more of the given expression. - - Parameters: - - expr - expression that must match one or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example:: - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: BLACK" - OneOrMore(attr_expr).parseString(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] - - # use stopOn attribute for OneOrMore to avoid reading label string as part of the data - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - OneOrMore(attr_expr).parseString(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] - - # could also be written as - (attr_expr * (1,)).parseString(text).pprint() - """ - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "{" + _ustr(self.expr) + "}..." - - return self.strRepr - -class ZeroOrMore(_MultipleMatch): - """ - Optional repetition of zero or more of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - stopOn - (default=C{None}) - expression for a terminating sentinel - (only required if the sentinel would ordinarily match the repetition - expression) - - Example: similar to L{OneOrMore} - """ - def __init__( self, expr, stopOn=None): - super(ZeroOrMore,self).__init__(expr, stopOn=stopOn) - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - return super(ZeroOrMore, self).parseImpl(instring, loc, doActions) - except (ParseException,IndexError): - return loc, [] - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]..." - - return self.strRepr - -class _NullToken(object): - def __bool__(self): - return False - __nonzero__ = __bool__ - def __str__(self): - return "" - -_optionalNotMatched = _NullToken() -class Optional(ParseElementEnhance): - """ - Optional matching of the given expression. - - Parameters: - - expr - expression that must match zero or more times - - default (optional) - value to be returned if the optional expression is not found. - - Example:: - # US postal code can be a 5-digit zip, plus optional 4-digit qualifier - zip = Combine(Word(nums, exact=5) + Optional('-' + Word(nums, exact=4))) - zip.runTests(''' - # traditional ZIP code - 12345 - - # ZIP+4 form - 12101-0001 - - # invalid ZIP - 98765- - ''') - prints:: - # traditional ZIP code - 12345 - ['12345'] - - # ZIP+4 form - 12101-0001 - ['12101-0001'] - - # invalid ZIP - 98765- - ^ - FAIL: Expected end of text (at char 5), (line:1, col:6) - """ - def __init__( self, expr, default=_optionalNotMatched ): - super(Optional,self).__init__( expr, savelist=False ) - self.saveAsList = self.expr.saveAsList - self.defaultValue = default - self.mayReturnEmpty = True - - def parseImpl( self, instring, loc, doActions=True ): - try: - loc, tokens = self.expr._parse( instring, loc, doActions, callPreParse=False ) - except (ParseException,IndexError): - if self.defaultValue is not _optionalNotMatched: - if self.expr.resultsName: - tokens = ParseResults([ self.defaultValue ]) - tokens[self.expr.resultsName] = self.defaultValue - else: - tokens = [ self.defaultValue ] - else: - tokens = [] - return loc, tokens - - def __str__( self ): - if hasattr(self,"name"): - return self.name - - if self.strRepr is None: - self.strRepr = "[" + _ustr(self.expr) + "]" - - return self.strRepr - -class SkipTo(ParseElementEnhance): - """ - Token for skipping over all undefined text until the matched expression is found. - - Parameters: - - expr - target expression marking the end of the data to be skipped - - include - (default=C{False}) if True, the target expression is also parsed - (the skipped text and target expression are returned as a 2-element list). - - ignore - (default=C{None}) used to define grammars (typically quoted strings and - comments) that might contain false matches to the target expression - - failOn - (default=C{None}) define expressions that are not allowed to be - included in the skipped test; if found before the target expression is found, - the SkipTo is not a match - - Example:: - report = ''' - Outstanding Issues Report - 1 Jan 2000 - - # | Severity | Description | Days Open - -----+----------+-------------------------------------------+----------- - 101 | Critical | Intermittent system crash | 6 - 94 | Cosmetic | Spelling error on Login ('log|n') | 14 - 79 | Minor | System slow when running too many reports | 47 - ''' - integer = Word(nums) - SEP = Suppress('|') - # use SkipTo to simply match everything up until the next SEP - # - ignore quoted strings, so that a '|' character inside a quoted string does not match - # - parse action will call token.strip() for each matched token, i.e., the description body - string_data = SkipTo(SEP, ignore=quotedString) - string_data.setParseAction(tokenMap(str.strip)) - ticket_expr = (integer("issue_num") + SEP - + string_data("sev") + SEP - + string_data("desc") + SEP - + integer("days_open")) - - for tkt in ticket_expr.searchString(report): - print tkt.dump() - prints:: - ['101', 'Critical', 'Intermittent system crash', '6'] - - days_open: 6 - - desc: Intermittent system crash - - issue_num: 101 - - sev: Critical - ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] - - days_open: 14 - - desc: Spelling error on Login ('log|n') - - issue_num: 94 - - sev: Cosmetic - ['79', 'Minor', 'System slow when running too many reports', '47'] - - days_open: 47 - - desc: System slow when running too many reports - - issue_num: 79 - - sev: Minor - """ - def __init__( self, other, include=False, ignore=None, failOn=None ): - super( SkipTo, self ).__init__( other ) - self.ignoreExpr = ignore - self.mayReturnEmpty = True - self.mayIndexError = False - self.includeMatch = include - self.asList = False - if isinstance(failOn, basestring): - self.failOn = ParserElement._literalStringClass(failOn) - else: - self.failOn = failOn - self.errmsg = "No match found for "+_ustr(self.expr) - - def parseImpl( self, instring, loc, doActions=True ): - startloc = loc - instrlen = len(instring) - expr = self.expr - expr_parse = self.expr._parse - self_failOn_canParseNext = self.failOn.canParseNext if self.failOn is not None else None - self_ignoreExpr_tryParse = self.ignoreExpr.tryParse if self.ignoreExpr is not None else None - - tmploc = loc - while tmploc <= instrlen: - if self_failOn_canParseNext is not None: - # break if failOn expression matches - if self_failOn_canParseNext(instring, tmploc): - break - - if self_ignoreExpr_tryParse is not None: - # advance past ignore expressions - while 1: - try: - tmploc = self_ignoreExpr_tryParse(instring, tmploc) - except ParseBaseException: - break - - try: - expr_parse(instring, tmploc, doActions=False, callPreParse=False) - except (ParseException, IndexError): - # no match, advance loc in string - tmploc += 1 - else: - # matched skipto expr, done - break - - else: - # ran off the end of the input string without matching skipto expr, fail - raise ParseException(instring, loc, self.errmsg, self) - - # build up return values - loc = tmploc - skiptext = instring[startloc:loc] - skipresult = ParseResults(skiptext) - - if self.includeMatch: - loc, mat = expr_parse(instring,loc,doActions,callPreParse=False) - skipresult += mat - - return loc, skipresult - -class Forward(ParseElementEnhance): - """ - Forward declaration of an expression to be defined later - - used for recursive grammars, such as algebraic infix notation. - When the expression is known, it is assigned to the C{Forward} variable using the '<<' operator. - - Note: take care when assigning to C{Forward} not to overlook precedence of operators. - Specifically, '|' has a lower precedence than '<<', so that:: - fwdExpr << a | b | c - will actually be evaluated as:: - (fwdExpr << a) | b | c - thereby leaving b and c out as parseable alternatives. It is recommended that you - explicitly group the values inserted into the C{Forward}:: - fwdExpr << (a | b | c) - Converting to use the '<<=' operator instead will avoid this problem. - - See L{ParseResults.pprint} for an example of a recursive parser created using - C{Forward}. - """ - def __init__( self, other=None ): - super(Forward,self).__init__( other, savelist=False ) - - def __lshift__( self, other ): - if isinstance( other, basestring ): - other = ParserElement._literalStringClass(other) - self.expr = other - self.strRepr = None - self.mayIndexError = self.expr.mayIndexError - self.mayReturnEmpty = self.expr.mayReturnEmpty - self.setWhitespaceChars( self.expr.whiteChars ) - self.skipWhitespace = self.expr.skipWhitespace - self.saveAsList = self.expr.saveAsList - self.ignoreExprs.extend(self.expr.ignoreExprs) - return self - - def __ilshift__(self, other): - return self << other - - def leaveWhitespace( self ): - self.skipWhitespace = False - return self - - def streamline( self ): - if not self.streamlined: - self.streamlined = True - if self.expr is not None: - self.expr.streamline() - return self - - def validate( self, validateTrace=[] ): - if self not in validateTrace: - tmp = validateTrace[:]+[self] - if self.expr is not None: - self.expr.validate(tmp) - self.checkRecursion([]) - - def __str__( self ): - if hasattr(self,"name"): - return self.name - return self.__class__.__name__ + ": ..." - - # stubbed out for now - creates awful memory and perf issues - self._revertClass = self.__class__ - self.__class__ = _ForwardNoRecurse - try: - if self.expr is not None: - retString = _ustr(self.expr) - else: - retString = "None" - finally: - self.__class__ = self._revertClass - return self.__class__.__name__ + ": " + retString - - def copy(self): - if self.expr is not None: - return super(Forward,self).copy() - else: - ret = Forward() - ret <<= self - return ret - -class _ForwardNoRecurse(Forward): - def __str__( self ): - return "..." - -class TokenConverter(ParseElementEnhance): - """ - Abstract subclass of C{ParseExpression}, for converting parsed results. - """ - def __init__( self, expr, savelist=False ): - super(TokenConverter,self).__init__( expr )#, savelist ) - self.saveAsList = False - -class Combine(TokenConverter): - """ - Converter to concatenate all matching tokens to a single string. - By default, the matching patterns must also be contiguous in the input string; - this can be disabled by specifying C{'adjacent=False'} in the constructor. - - Example:: - real = Word(nums) + '.' + Word(nums) - print(real.parseString('3.1416')) # -> ['3', '.', '1416'] - # will also erroneously match the following - print(real.parseString('3. 1416')) # -> ['3', '.', '1416'] - - real = Combine(Word(nums) + '.' + Word(nums)) - print(real.parseString('3.1416')) # -> ['3.1416'] - # no match when there are internal spaces - print(real.parseString('3. 1416')) # -> Exception: Expected W:(0123...) - """ - def __init__( self, expr, joinString="", adjacent=True ): - super(Combine,self).__init__( expr ) - # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself - if adjacent: - self.leaveWhitespace() - self.adjacent = adjacent - self.skipWhitespace = True - self.joinString = joinString - self.callPreparse = True - - def ignore( self, other ): - if self.adjacent: - ParserElement.ignore(self, other) - else: - super( Combine, self).ignore( other ) - return self - - def postParse( self, instring, loc, tokenlist ): - retToks = tokenlist.copy() - del retToks[:] - retToks += ParseResults([ "".join(tokenlist._asStringList(self.joinString)) ], modal=self.modalResults) - - if self.resultsName and retToks.haskeys(): - return [ retToks ] - else: - return retToks - -class Group(TokenConverter): - """ - Converter to return the matched tokens as a list - useful for returning tokens of C{L{ZeroOrMore}} and C{L{OneOrMore}} expressions. - - Example:: - ident = Word(alphas) - num = Word(nums) - term = ident | num - func = ident + Optional(delimitedList(term)) - print(func.parseString("fn a,b,100")) # -> ['fn', 'a', 'b', '100'] - - func = ident + Group(Optional(delimitedList(term))) - print(func.parseString("fn a,b,100")) # -> ['fn', ['a', 'b', '100']] - """ - def __init__( self, expr ): - super(Group,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - return [ tokenlist ] - -class Dict(TokenConverter): - """ - Converter to return a repetitive expression as a list, but also as a dictionary. - Each element can also be referenced using the first token in the expression as its key. - Useful for tabular report scraping when the first column can be used as a item key. - - Example:: - data_word = Word(alphas) - label = data_word + FollowedBy(':') - attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).setParseAction(' '.join)) - - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - - # print attributes as plain groups - print(OneOrMore(attr_expr).parseString(text).dump()) - - # instead of OneOrMore(expr), parse using Dict(OneOrMore(Group(expr))) - Dict will auto-assign names - result = Dict(OneOrMore(Group(attr_expr))).parseString(text) - print(result.dump()) - - # access named fields as dict entries, or output as dict - print(result['shape']) - print(result.asDict()) - prints:: - ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] - - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} - See more examples at L{ParseResults} of accessing fields by results name. - """ - def __init__( self, expr ): - super(Dict,self).__init__( expr ) - self.saveAsList = True - - def postParse( self, instring, loc, tokenlist ): - for i,tok in enumerate(tokenlist): - if len(tok) == 0: - continue - ikey = tok[0] - if isinstance(ikey,int): - ikey = _ustr(tok[0]).strip() - if len(tok)==1: - tokenlist[ikey] = _ParseResultsWithOffset("",i) - elif len(tok)==2 and not isinstance(tok[1],ParseResults): - tokenlist[ikey] = _ParseResultsWithOffset(tok[1],i) - else: - dictvalue = tok.copy() #ParseResults(i) - del dictvalue[0] - if len(dictvalue)!= 1 or (isinstance(dictvalue,ParseResults) and dictvalue.haskeys()): - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue,i) - else: - tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0],i) - - if self.resultsName: - return [ tokenlist ] - else: - return tokenlist - - -class Suppress(TokenConverter): - """ - Converter for ignoring the results of a parsed expression. - - Example:: - source = "a, b, c,d" - wd = Word(alphas) - wd_list1 = wd + ZeroOrMore(',' + wd) - print(wd_list1.parseString(source)) - - # often, delimiters that are useful during parsing are just in the - # way afterward - use Suppress to keep them out of the parsed output - wd_list2 = wd + ZeroOrMore(Suppress(',') + wd) - print(wd_list2.parseString(source)) - prints:: - ['a', ',', 'b', ',', 'c', ',', 'd'] - ['a', 'b', 'c', 'd'] - (See also L{delimitedList}.) - """ - def postParse( self, instring, loc, tokenlist ): - return [] - - def suppress( self ): - return self - - -class OnlyOnce(object): - """ - Wrapper for parse actions, to ensure they are only called once. - """ - def __init__(self, methodCall): - self.callable = _trim_arity(methodCall) - self.called = False - def __call__(self,s,l,t): - if not self.called: - results = self.callable(s,l,t) - self.called = True - return results - raise ParseException(s,l,"") - def reset(self): - self.called = False - -def traceParseAction(f): - """ - Decorator for debugging parse actions. - - When the parse action is called, this decorator will print C{">> entering I{method-name}(line:I{current_source_line}, I{parse_location}, I{matched_tokens})".} - When the parse action completes, the decorator will print C{"<<"} followed by the returned value, or any exception that the parse action raised. - - Example:: - wd = Word(alphas) - - @traceParseAction - def remove_duplicate_chars(tokens): - return ''.join(sorted(set(''.join(tokens))) - - wds = OneOrMore(wd).setParseAction(remove_duplicate_chars) - print(wds.parseString("slkdjs sld sldd sdlf sdljf")) - prints:: - >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) - <<leaving remove_duplicate_chars (ret: 'dfjkls') - ['dfjkls'] - """ - f = _trim_arity(f) - def z(*paArgs): - thisFunc = f.__name__ - s,l,t = paArgs[-3:] - if len(paArgs)>3: - thisFunc = paArgs[0].__class__.__name__ + '.' + thisFunc - sys.stderr.write( ">>entering %s(line: '%s', %d, %r)\n" % (thisFunc,line(l,s),l,t) ) - try: - ret = f(*paArgs) - except Exception as exc: - sys.stderr.write( "<<leaving %s (exception: %s)\n" % (thisFunc,exc) ) - raise - sys.stderr.write( "<<leaving %s (ret: %r)\n" % (thisFunc,ret) ) - return ret - try: - z.__name__ = f.__name__ - except AttributeError: - pass - return z - -# -# global helpers -# -def delimitedList( expr, delim=",", combine=False ): - """ - Helper to define a delimited list of expressions - the delimiter defaults to ','. - By default, the list elements and delimiters can have intervening whitespace, and - comments, but this can be overridden by passing C{combine=True} in the constructor. - If C{combine} is set to C{True}, the matching tokens are returned as a single token - string, with the delimiters included; otherwise, the matching tokens are returned - as a list of tokens, with the delimiters suppressed. - - Example:: - delimitedList(Word(alphas)).parseString("aa,bb,cc") # -> ['aa', 'bb', 'cc'] - delimitedList(Word(hexnums), delim=':', combine=True).parseString("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] - """ - dlName = _ustr(expr)+" ["+_ustr(delim)+" "+_ustr(expr)+"]..." - if combine: - return Combine( expr + ZeroOrMore( delim + expr ) ).setName(dlName) - else: - return ( expr + ZeroOrMore( Suppress( delim ) + expr ) ).setName(dlName) - -def countedArray( expr, intExpr=None ): - """ - Helper to define a counted list of expressions. - This helper defines a pattern of the form:: - integer expr expr expr... - where the leading integer tells how many expr expressions follow. - The matched tokens returns the array of expr tokens as a list - the leading count token is suppressed. - - If C{intExpr} is specified, it should be a pyparsing expression that produces an integer value. - - Example:: - countedArray(Word(alphas)).parseString('2 ab cd ef') # -> ['ab', 'cd'] - - # in this parser, the leading integer value is given in binary, - # '10' indicating that 2 values are in the array - binaryConstant = Word('01').setParseAction(lambda t: int(t[0], 2)) - countedArray(Word(alphas), intExpr=binaryConstant).parseString('10 ab cd ef') # -> ['ab', 'cd'] - """ - arrayExpr = Forward() - def countFieldParseAction(s,l,t): - n = t[0] - arrayExpr << (n and Group(And([expr]*n)) or Group(empty)) - return [] - if intExpr is None: - intExpr = Word(nums).setParseAction(lambda t:int(t[0])) - else: - intExpr = intExpr.copy() - intExpr.setName("arrayLen") - intExpr.addParseAction(countFieldParseAction, callDuringTry=True) - return ( intExpr + arrayExpr ).setName('(len) ' + _ustr(expr) + '...') - -def _flatten(L): - ret = [] - for i in L: - if isinstance(i,list): - ret.extend(_flatten(i)) - else: - ret.append(i) - return ret - -def matchPreviousLiteral(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousLiteral(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches a - previous literal, will also match the leading C{"1:1"} in C{"1:10"}. - If this is not desired, use C{matchPreviousExpr}. - Do I{not} use with packrat parsing enabled. - """ - rep = Forward() - def copyTokenToRepeater(s,l,t): - if t: - if len(t) == 1: - rep << t[0] - else: - # flatten t tokens - tflat = _flatten(t.asList()) - rep << And(Literal(tt) for tt in tflat) - else: - rep << Empty() - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def matchPreviousExpr(expr): - """ - Helper to define an expression that is indirectly defined from - the tokens matched in a previous expression, that is, it looks - for a 'repeat' of a previous expression. For example:: - first = Word(nums) - second = matchPreviousExpr(first) - matchExpr = first + ":" + second - will match C{"1:1"}, but not C{"1:2"}. Because this matches by - expressions, will I{not} match the leading C{"1:1"} in C{"1:10"}; - the expressions are evaluated first, and then compared, so - C{"1"} is compared with C{"10"}. - Do I{not} use with packrat parsing enabled. - """ - rep = Forward() - e2 = expr.copy() - rep <<= e2 - def copyTokenToRepeater(s,l,t): - matchTokens = _flatten(t.asList()) - def mustMatchTheseTokens(s,l,t): - theseTokens = _flatten(t.asList()) - if theseTokens != matchTokens: - raise ParseException("",0,"") - rep.setParseAction( mustMatchTheseTokens, callDuringTry=True ) - expr.addParseAction(copyTokenToRepeater, callDuringTry=True) - rep.setName('(prev) ' + _ustr(expr)) - return rep - -def _escapeRegexRangeChars(s): - #~ escape these chars: ^-] - for c in r"\^-]": - s = s.replace(c,_bslash+c) - s = s.replace("\n",r"\n") - s = s.replace("\t",r"\t") - return _ustr(s) - -def oneOf( strs, caseless=False, useRegex=True ): - """ - Helper to quickly define a set of alternative Literals, and makes sure to do - longest-first testing when there is a conflict, regardless of the input order, - but returns a C{L{MatchFirst}} for best performance. - - Parameters: - - strs - a string of space-delimited literals, or a collection of string literals - - caseless - (default=C{False}) - treat all literals as caseless - - useRegex - (default=C{True}) - as an optimization, will generate a Regex - object; otherwise, will generate a C{MatchFirst} object (if C{caseless=True}, or - if creating a C{Regex} raises an exception) - - Example:: - comp_oper = oneOf("< = > <= >= !=") - var = Word(alphas) - number = Word(nums) - term = var | number - comparison_expr = term + comp_oper + term - print(comparison_expr.searchString("B = 12 AA=23 B<=AA AA>12")) - prints:: - [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] - """ - if caseless: - isequal = ( lambda a,b: a.upper() == b.upper() ) - masks = ( lambda a,b: b.upper().startswith(a.upper()) ) - parseElementClass = CaselessLiteral - else: - isequal = ( lambda a,b: a == b ) - masks = ( lambda a,b: b.startswith(a) ) - parseElementClass = Literal - - symbols = [] - if isinstance(strs,basestring): - symbols = strs.split() - elif isinstance(strs, collections.Iterable): - symbols = list(strs) - else: - warnings.warn("Invalid argument to oneOf, expected string or iterable", - SyntaxWarning, stacklevel=2) - if not symbols: - return NoMatch() - - i = 0 - while i < len(symbols)-1: - cur = symbols[i] - for j,other in enumerate(symbols[i+1:]): - if ( isequal(other, cur) ): - del symbols[i+j+1] - break - elif ( masks(cur, other) ): - del symbols[i+j+1] - symbols.insert(i,other) - cur = other - break - else: - i += 1 - - if not caseless and useRegex: - #~ print (strs,"->", "|".join( [ _escapeRegexChars(sym) for sym in symbols] )) - try: - if len(symbols)==len("".join(symbols)): - return Regex( "[%s]" % "".join(_escapeRegexRangeChars(sym) for sym in symbols) ).setName(' | '.join(symbols)) - else: - return Regex( "|".join(re.escape(sym) for sym in symbols) ).setName(' | '.join(symbols)) - except Exception: - warnings.warn("Exception creating Regex for oneOf, building MatchFirst", - SyntaxWarning, stacklevel=2) - - - # last resort, just use MatchFirst - return MatchFirst(parseElementClass(sym) for sym in symbols).setName(' | '.join(symbols)) - -def dictOf( key, value ): - """ - Helper to easily and clearly define a dictionary by specifying the respective patterns - for the key and value. Takes care of defining the C{L{Dict}}, C{L{ZeroOrMore}}, and C{L{Group}} tokens - in the proper order. The key pattern can include delimiting markers or punctuation, - as long as they are suppressed, thereby leaving the significant key text. The value - pattern can include named results, so that the C{Dict} results can include named token - fields. - - Example:: - text = "shape: SQUARE posn: upper left color: light blue texture: burlap" - attr_expr = (label + Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join)) - print(OneOrMore(attr_expr).parseString(text).dump()) - - attr_label = label - attr_value = Suppress(':') + OneOrMore(data_word, stopOn=label).setParseAction(' '.join) - - # similar to Dict, but simpler call format - result = dictOf(attr_label, attr_value).parseString(text) - print(result.dump()) - print(result['shape']) - print(result.shape) # object attribute access works too - print(result.asDict()) - prints:: - [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] - - color: light blue - - posn: upper left - - shape: SQUARE - - texture: burlap - SQUARE - SQUARE - {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} - """ - return Dict( ZeroOrMore( Group ( key + value ) ) ) - -def originalTextFor(expr, asString=True): - """ - Helper to return the original, untokenized text for a given expression. Useful to - restore the parsed fields of an HTML start tag into the raw tag text itself, or to - revert separate tokens with intervening whitespace back to the original matching - input text. By default, returns astring containing the original parsed text. - - If the optional C{asString} argument is passed as C{False}, then the return value is a - C{L{ParseResults}} containing any results names that were originally matched, and a - single token containing the original matched text from the input string. So if - the expression passed to C{L{originalTextFor}} contains expressions with defined - results names, you must set C{asString} to C{False} if you want to preserve those - results name values. - - Example:: - src = "this is test <b> bold <i>text</i> </b> normal text " - for tag in ("b","i"): - opener,closer = makeHTMLTags(tag) - patt = originalTextFor(opener + SkipTo(closer) + closer) - print(patt.searchString(src)[0]) - prints:: - ['<b> bold <i>text</i> </b>'] - ['<i>text</i>'] - """ - locMarker = Empty().setParseAction(lambda s,loc,t: loc) - endlocMarker = locMarker.copy() - endlocMarker.callPreparse = False - matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") - if asString: - extractText = lambda s,l,t: s[t._original_start:t._original_end] - else: - def extractText(s,l,t): - t[:] = [s[t.pop('_original_start'):t.pop('_original_end')]] - matchExpr.setParseAction(extractText) - matchExpr.ignoreExprs = expr.ignoreExprs - return matchExpr - -def ungroup(expr): - """ - Helper to undo pyparsing's default grouping of And expressions, even - if all but one are non-empty. - """ - return TokenConverter(expr).setParseAction(lambda t:t[0]) - -def locatedExpr(expr): - """ - Helper to decorate a returned token with its starting and ending locations in the input string. - This helper adds the following results names: - - locn_start = location where matched expression begins - - locn_end = location where matched expression ends - - value = the actual parsed results - - Be careful if the input text contains C{<TAB>} characters, you may want to call - C{L{ParserElement.parseWithTabs}} - - Example:: - wd = Word(alphas) - for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): - print(match) - prints:: - [[0, 'ljsdf', 5]] - [[8, 'lksdjjf', 15]] - [[18, 'lkkjj', 23]] - """ - locator = Empty().setParseAction(lambda s,l,t: l) - return Group(locator("locn_start") + expr("value") + locator.copy().leaveWhitespace()("locn_end")) - - -# convenience constants for positional expressions -empty = Empty().setName("empty") -lineStart = LineStart().setName("lineStart") -lineEnd = LineEnd().setName("lineEnd") -stringStart = StringStart().setName("stringStart") -stringEnd = StringEnd().setName("stringEnd") - -_escapedPunc = Word( _bslash, r"\[]-*.$+^?()~ ", exact=2 ).setParseAction(lambda s,l,t:t[0][1]) -_escapedHexChar = Regex(r"\\0?[xX][0-9a-fA-F]+").setParseAction(lambda s,l,t:unichr(int(t[0].lstrip(r'\0x'),16))) -_escapedOctChar = Regex(r"\\0[0-7]+").setParseAction(lambda s,l,t:unichr(int(t[0][1:],8))) -_singleChar = _escapedPunc | _escapedHexChar | _escapedOctChar | Word(printables, excludeChars=r'\]', exact=1) | Regex(r"\w", re.UNICODE) -_charRange = Group(_singleChar + Suppress("-") + _singleChar) -_reBracketExpr = Literal("[") + Optional("^").setResultsName("negate") + Group( OneOrMore( _charRange | _singleChar ) ).setResultsName("body") + "]" - -def srange(s): - r""" - Helper to easily define string ranges for use in Word construction. Borrows - syntax from regexp '[]' string range definitions:: - srange("[0-9]") -> "0123456789" - srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" - srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" - The input string must be enclosed in []'s, and the returned string is the expanded - character set joined into a single string. - The values enclosed in the []'s may be: - - a single character - - an escaped character with a leading backslash (such as C{\-} or C{\]}) - - an escaped hex character with a leading C{'\x'} (C{\x21}, which is a C{'!'} character) - (C{\0x##} is also supported for backwards compatibility) - - an escaped octal character with a leading C{'\0'} (C{\041}, which is a C{'!'} character) - - a range of any of the above, separated by a dash (C{'a-z'}, etc.) - - any combination of the above (C{'aeiouy'}, C{'a-zA-Z0-9_$'}, etc.) - """ - _expanded = lambda p: p if not isinstance(p,ParseResults) else ''.join(unichr(c) for c in range(ord(p[0]),ord(p[1])+1)) - try: - return "".join(_expanded(part) for part in _reBracketExpr.parseString(s).body) - except Exception: - return "" - -def matchOnlyAtCol(n): - """ - Helper method for defining parse actions that require matching at a specific - column in the input text. - """ - def verifyCol(strg,locn,toks): - if col(locn,strg) != n: - raise ParseException(strg,locn,"matched token not at column %d" % n) - return verifyCol - -def replaceWith(replStr): - """ - Helper method for common parse actions that simply return a literal value. Especially - useful when used with C{L{transformString<ParserElement.transformString>}()}. - - Example:: - num = Word(nums).setParseAction(lambda toks: int(toks[0])) - na = oneOf("N/A NA").setParseAction(replaceWith(math.nan)) - term = na | num - - OneOrMore(term).parseString("324 234 N/A 234") # -> [324, 234, nan, 234] - """ - return lambda s,l,t: [replStr] - -def removeQuotes(s,l,t): - """ - Helper parse action for removing quotation marks from parsed quoted strings. - - Example:: - # by default, quotation marks are included in parsed results - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"] - - # use removeQuotes to strip quotation marks from parsed results - quotedString.setParseAction(removeQuotes) - quotedString.parseString("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"] - """ - return t[0][1:-1] - -def tokenMap(func, *args): - """ - Helper to define a parse action by mapping a function to all elements of a ParseResults list.If any additional - args are passed, they are forwarded to the given function as additional arguments after - the token, as in C{hex_integer = Word(hexnums).setParseAction(tokenMap(int, 16))}, which will convert the - parsed data to an integer using base 16. - - Example (compare the last to example in L{ParserElement.transformString}:: - hex_ints = OneOrMore(Word(hexnums)).setParseAction(tokenMap(int, 16)) - hex_ints.runTests(''' - 00 11 22 aa FF 0a 0d 1a - ''') - - upperword = Word(alphas).setParseAction(tokenMap(str.upper)) - OneOrMore(upperword).runTests(''' - my kingdom for a horse - ''') - - wd = Word(alphas).setParseAction(tokenMap(str.title)) - OneOrMore(wd).setParseAction(' '.join).runTests(''' - now is the winter of our discontent made glorious summer by this sun of york - ''') - prints:: - 00 11 22 aa FF 0a 0d 1a - [0, 17, 34, 170, 255, 10, 13, 26] - - my kingdom for a horse - ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] - - now is the winter of our discontent made glorious summer by this sun of york - ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] - """ - def pa(s,l,t): - return [func(tokn, *args) for tokn in t] - - try: - func_name = getattr(func, '__name__', - getattr(func, '__class__').__name__) - except Exception: - func_name = str(func) - pa.__name__ = func_name - - return pa - -upcaseTokens = tokenMap(lambda t: _ustr(t).upper()) -"""(Deprecated) Helper parse action to convert tokens to upper case. Deprecated in favor of L{pyparsing_common.upcaseTokens}""" - -downcaseTokens = tokenMap(lambda t: _ustr(t).lower()) -"""(Deprecated) Helper parse action to convert tokens to lower case. Deprecated in favor of L{pyparsing_common.downcaseTokens}""" - -def _makeTags(tagStr, xml): - """Internal helper to construct opening and closing tag expressions, given a tag name""" - if isinstance(tagStr,basestring): - resname = tagStr - tagStr = Keyword(tagStr, caseless=not xml) - else: - resname = tagStr.name - - tagAttrName = Word(alphas,alphanums+"_-:") - if (xml): - tagAttrValue = dblQuotedString.copy().setParseAction( removeQuotes ) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName + Suppress("=") + tagAttrValue ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - else: - printablesLessRAbrack = "".join(c for c in printables if c not in ">") - tagAttrValue = quotedString.copy().setParseAction( removeQuotes ) | Word(printablesLessRAbrack) - openTag = Suppress("<") + tagStr("tag") + \ - Dict(ZeroOrMore(Group( tagAttrName.setParseAction(downcaseTokens) + \ - Optional( Suppress("=") + tagAttrValue ) ))) + \ - Optional("/",default=[False]).setResultsName("empty").setParseAction(lambda s,l,t:t[0]=='/') + Suppress(">") - closeTag = Combine(_L("</") + tagStr + ">") - - openTag = openTag.setResultsName("start"+"".join(resname.replace(":"," ").title().split())).setName("<%s>" % resname) - closeTag = closeTag.setResultsName("end"+"".join(resname.replace(":"," ").title().split())).setName("</%s>" % resname) - openTag.tag = resname - closeTag.tag = resname - return openTag, closeTag - -def makeHTMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for HTML, given a tag name. Matches - tags in either upper or lower case, attributes with namespaces and with quoted or unquoted values. - - Example:: - text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' - # makeHTMLTags returns pyparsing expressions for the opening and closing tags as a 2-tuple - a,a_end = makeHTMLTags("A") - link_expr = a + SkipTo(a_end)("link_text") + a_end - - for link in link_expr.searchString(text): - # attributes in the <A> tag (like "href" shown here) are also accessible as named results - print(link.link_text, '->', link.href) - prints:: - pyparsing -> http://pyparsing.wikispaces.com - """ - return _makeTags( tagStr, False ) - -def makeXMLTags(tagStr): - """ - Helper to construct opening and closing tag expressions for XML, given a tag name. Matches - tags only in the given upper/lower case. - - Example: similar to L{makeHTMLTags} - """ - return _makeTags( tagStr, True ) - -def withAttribute(*args,**attrDict): - """ - Helper to create a validating parse action to be used with start tags created - with C{L{makeXMLTags}} or C{L{makeHTMLTags}}. Use C{withAttribute} to qualify a starting tag - with a required attribute value, to avoid false matches on common tags such as - C{<TD>} or C{<DIV>}. - - Call C{withAttribute} with a series of attribute names and values. Specify the list - of filter attributes names and values as: - - keyword arguments, as in C{(align="right")}, or - - as an explicit dict with C{**} operator, when an attribute name is also a Python - reserved word, as in C{**{"class":"Customer", "align":"right"}} - - a list of name-value tuples, as in ( ("ns1:class", "Customer"), ("ns2:align","right") ) - For attribute names with a namespace prefix, you must use the second form. Attribute - names are matched insensitive to upper/lower case. - - If just testing for C{class} (with or without a namespace), use C{L{withClass}}. - - To verify that the attribute exists, but without specifying a value, pass - C{withAttribute.ANY_VALUE} as the value. - - Example:: - html = ''' - <div> - Some text - <div type="grid">1 4 0 1 0</div> - <div type="graph">1,3 2,3 1,1</div> - <div>this has no type</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - - # only match div tag having a type attribute with value "grid" - div_grid = div().setParseAction(withAttribute(type="grid")) - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - # construct a match with any div tag having a type attribute, regardless of the value - div_any_type = div().setParseAction(withAttribute(type=withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - if args: - attrs = args[:] - else: - attrs = attrDict.items() - attrs = [(k,v) for k,v in attrs] - def pa(s,l,tokens): - for attrName,attrValue in attrs: - if attrName not in tokens: - raise ParseException(s,l,"no matching attribute " + attrName) - if attrValue != withAttribute.ANY_VALUE and tokens[attrName] != attrValue: - raise ParseException(s,l,"attribute '%s' has value '%s', must be '%s'" % - (attrName, tokens[attrName], attrValue)) - return pa -withAttribute.ANY_VALUE = object() - -def withClass(classname, namespace=''): - """ - Simplified version of C{L{withAttribute}} when matching on a div class - made - difficult because C{class} is a reserved word in Python. - - Example:: - html = ''' - <div> - Some text - <div class="grid">1 4 0 1 0</div> - <div class="graph">1,3 2,3 1,1</div> - <div>this <div> has no class</div> - </div> - - ''' - div,div_end = makeHTMLTags("div") - div_grid = div().setParseAction(withClass("grid")) - - grid_expr = div_grid + SkipTo(div | div_end)("body") - for grid_header in grid_expr.searchString(html): - print(grid_header.body) - - div_any_type = div().setParseAction(withClass(withAttribute.ANY_VALUE)) - div_expr = div_any_type + SkipTo(div | div_end)("body") - for div_header in div_expr.searchString(html): - print(div_header.body) - prints:: - 1 4 0 1 0 - - 1 4 0 1 0 - 1,3 2,3 1,1 - """ - classattr = "%s:class" % namespace if namespace else "class" - return withAttribute(**{classattr : classname}) - -opAssoc = _Constants() -opAssoc.LEFT = object() -opAssoc.RIGHT = object() - -def infixNotation( baseExpr, opList, lpar=Suppress('('), rpar=Suppress(')') ): - """ - Helper method for constructing grammars of expressions made up of - operators working in a precedence hierarchy. Operators may be unary or - binary, left- or right-associative. Parse actions can also be attached - to operator expressions. The generated parser will also recognize the use - of parentheses to override operator precedences (see example below). - - Note: if you define a deep operator list, you may see performance issues - when using infixNotation. See L{ParserElement.enablePackrat} for a - mechanism to potentially improve your parser performance. - - Parameters: - - baseExpr - expression representing the most basic element for the nested - - opList - list of tuples, one for each operator precedence level in the - expression grammar; each tuple is of the form - (opExpr, numTerms, rightLeftAssoc, parseAction), where: - - opExpr is the pyparsing expression for the operator; - may also be a string, which will be converted to a Literal; - if numTerms is 3, opExpr is a tuple of two expressions, for the - two operators separating the 3 terms - - numTerms is the number of terms for this operator (must - be 1, 2, or 3) - - rightLeftAssoc is the indicator whether the operator is - right or left associative, using the pyparsing-defined - constants C{opAssoc.RIGHT} and C{opAssoc.LEFT}. - - parseAction is the parse action to be associated with - expressions matching this operator expression (the - parse action tuple member may be omitted) - - lpar - expression for matching left-parentheses (default=C{Suppress('(')}) - - rpar - expression for matching right-parentheses (default=C{Suppress(')')}) - - Example:: - # simple example of four-function arithmetic with ints and variable names - integer = pyparsing_common.signed_integer - varname = pyparsing_common.identifier - - arith_expr = infixNotation(integer | varname, - [ - ('-', 1, opAssoc.RIGHT), - (oneOf('* /'), 2, opAssoc.LEFT), - (oneOf('+ -'), 2, opAssoc.LEFT), - ]) - - arith_expr.runTests(''' - 5+3*6 - (5+3)*6 - -2--11 - ''', fullDump=False) - prints:: - 5+3*6 - [[5, '+', [3, '*', 6]]] - - (5+3)*6 - [[[5, '+', 3], '*', 6]] - - -2--11 - [[['-', 2], '-', ['-', 11]]] - """ - ret = Forward() - lastExpr = baseExpr | ( lpar + ret + rpar ) - for i,operDef in enumerate(opList): - opExpr,arity,rightLeftAssoc,pa = (operDef + (None,))[:4] - termName = "%s term" % opExpr if arity < 3 else "%s%s term" % opExpr - if arity == 3: - if opExpr is None or len(opExpr) != 2: - raise ValueError("if numterms=3, opExpr must be a tuple or list of two expressions") - opExpr1, opExpr2 = opExpr - thisExpr = Forward().setName(termName) - if rightLeftAssoc == opAssoc.LEFT: - if arity == 1: - matchExpr = FollowedBy(lastExpr + opExpr) + Group( lastExpr + OneOrMore( opExpr ) ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + lastExpr) + Group( lastExpr + OneOrMore( opExpr + lastExpr ) ) - else: - matchExpr = FollowedBy(lastExpr+lastExpr) + Group( lastExpr + OneOrMore(lastExpr) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr) + \ - Group( lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - elif rightLeftAssoc == opAssoc.RIGHT: - if arity == 1: - # try to avoid LR with this extra test - if not isinstance(opExpr, Optional): - opExpr = Optional(opExpr) - matchExpr = FollowedBy(opExpr.expr + thisExpr) + Group( opExpr + thisExpr ) - elif arity == 2: - if opExpr is not None: - matchExpr = FollowedBy(lastExpr + opExpr + thisExpr) + Group( lastExpr + OneOrMore( opExpr + thisExpr ) ) - else: - matchExpr = FollowedBy(lastExpr + thisExpr) + Group( lastExpr + OneOrMore( thisExpr ) ) - elif arity == 3: - matchExpr = FollowedBy(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + \ - Group( lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr ) - else: - raise ValueError("operator must be unary (1), binary (2), or ternary (3)") - else: - raise ValueError("operator must indicate right or left associativity") - if pa: - matchExpr.setParseAction( pa ) - thisExpr <<= ( matchExpr.setName(termName) | lastExpr ) - lastExpr = thisExpr - ret <<= lastExpr - return ret - -operatorPrecedence = infixNotation -"""(Deprecated) Former name of C{L{infixNotation}}, will be dropped in a future release.""" - -dblQuotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"').setName("string enclosed in double quotes") -sglQuotedString = Combine(Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("string enclosed in single quotes") -quotedString = Combine(Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*')+'"'| - Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*")+"'").setName("quotedString using single or double quotes") -unicodeString = Combine(_L('u') + quotedString.copy()).setName("unicode string literal") - -def nestedExpr(opener="(", closer=")", content=None, ignoreExpr=quotedString.copy()): - """ - Helper method for defining nested lists enclosed in opening and closing - delimiters ("(" and ")" are the default). - - Parameters: - - opener - opening character for a nested list (default=C{"("}); can also be a pyparsing expression - - closer - closing character for a nested list (default=C{")"}); can also be a pyparsing expression - - content - expression for items within the nested lists (default=C{None}) - - ignoreExpr - expression for ignoring opening and closing delimiters (default=C{quotedString}) - - If an expression is not provided for the content argument, the nested - expression will capture all whitespace-delimited content between delimiters - as a list of separate values. - - Use the C{ignoreExpr} argument to define expressions that may contain - opening or closing characters that should not be treated as opening - or closing characters for nesting, such as quotedString or a comment - expression. Specify multiple expressions using an C{L{Or}} or C{L{MatchFirst}}. - The default is L{quotedString}, but if no expressions are to be ignored, - then pass C{None} for this argument. - - Example:: - data_type = oneOf("void int short long char float double") - decl_data_type = Combine(data_type + Optional(Word('*'))) - ident = Word(alphas+'_', alphanums+'_') - number = pyparsing_common.number - arg = Group(decl_data_type + ident) - LPAR,RPAR = map(Suppress, "()") - - code_body = nestedExpr('{', '}', ignoreExpr=(quotedString | cStyleComment)) - - c_function = (decl_data_type("type") - + ident("name") - + LPAR + Optional(delimitedList(arg), [])("args") + RPAR - + code_body("body")) - c_function.ignore(cStyleComment) - - source_code = ''' - int is_odd(int x) { - return (x%2); - } - - int dec_to_hex(char hchar) { - if (hchar >= '0' && hchar <= '9') { - return (ord(hchar)-ord('0')); - } else { - return (10+ord(hchar)-ord('A')); - } - } - ''' - for func in c_function.searchString(source_code): - print("%(name)s (%(type)s) args: %(args)s" % func) - - prints:: - is_odd (int) args: [['int', 'x']] - dec_to_hex (int) args: [['char', 'hchar']] - """ - if opener == closer: - raise ValueError("opening and closing strings cannot be the same") - if content is None: - if isinstance(opener,basestring) and isinstance(closer,basestring): - if len(opener) == 1 and len(closer)==1: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (empty.copy()+CharsNotIn(opener+closer+ParserElement.DEFAULT_WHITE_CHARS - ).setParseAction(lambda t:t[0].strip())) - else: - if ignoreExpr is not None: - content = (Combine(OneOrMore(~ignoreExpr + - ~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - content = (Combine(OneOrMore(~Literal(opener) + ~Literal(closer) + - CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS,exact=1)) - ).setParseAction(lambda t:t[0].strip())) - else: - raise ValueError("opening and closing arguments must be strings if no content expression is given") - ret = Forward() - if ignoreExpr is not None: - ret <<= Group( Suppress(opener) + ZeroOrMore( ignoreExpr | ret | content ) + Suppress(closer) ) - else: - ret <<= Group( Suppress(opener) + ZeroOrMore( ret | content ) + Suppress(closer) ) - ret.setName('nested %s%s expression' % (opener,closer)) - return ret - -def indentedBlock(blockStatementExpr, indentStack, indent=True): - """ - Helper method for defining space-delimited indentation blocks, such as - those used to define block statements in Python source code. - - Parameters: - - blockStatementExpr - expression defining syntax of statement that - is repeated within the indented block - - indentStack - list created by caller to manage indentation stack - (multiple statementWithIndentedBlock expressions within a single grammar - should share a common indentStack) - - indent - boolean indicating whether block must be indented beyond the - the current level; set to False for block of left-most statements - (default=C{True}) - - A valid block must contain at least one C{blockStatement}. - - Example:: - data = ''' - def A(z): - A1 - B = 100 - G = A2 - A2 - A3 - B - def BB(a,b,c): - BB1 - def BBA(): - bba1 - bba2 - bba3 - C - D - def spam(x,y): - def eggs(z): - pass - ''' - - - indentStack = [1] - stmt = Forward() - - identifier = Word(alphas, alphanums) - funcDecl = ("def" + identifier + Group( "(" + Optional( delimitedList(identifier) ) + ")" ) + ":") - func_body = indentedBlock(stmt, indentStack) - funcDef = Group( funcDecl + func_body ) - - rvalue = Forward() - funcCall = Group(identifier + "(" + Optional(delimitedList(rvalue)) + ")") - rvalue << (funcCall | identifier | Word(nums)) - assignment = Group(identifier + "=" + rvalue) - stmt << ( funcDef | assignment | identifier ) - - module_body = OneOrMore(stmt) - - parseTree = module_body.parseString(data) - parseTree.pprint() - prints:: - [['def', - 'A', - ['(', 'z', ')'], - ':', - [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], - 'B', - ['def', - 'BB', - ['(', 'a', 'b', 'c', ')'], - ':', - [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], - 'C', - 'D', - ['def', - 'spam', - ['(', 'x', 'y', ')'], - ':', - [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] - """ - def checkPeerIndent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if curCol != indentStack[-1]: - if curCol > indentStack[-1]: - raise ParseFatalException(s,l,"illegal nesting") - raise ParseException(s,l,"not a peer entry") - - def checkSubIndent(s,l,t): - curCol = col(l,s) - if curCol > indentStack[-1]: - indentStack.append( curCol ) - else: - raise ParseException(s,l,"not a subentry") - - def checkUnindent(s,l,t): - if l >= len(s): return - curCol = col(l,s) - if not(indentStack and curCol < indentStack[-1] and curCol <= indentStack[-2]): - raise ParseException(s,l,"not an unindent") - indentStack.pop() - - NL = OneOrMore(LineEnd().setWhitespaceChars("\t ").suppress()) - INDENT = (Empty() + Empty().setParseAction(checkSubIndent)).setName('INDENT') - PEER = Empty().setParseAction(checkPeerIndent).setName('') - UNDENT = Empty().setParseAction(checkUnindent).setName('UNINDENT') - if indent: - smExpr = Group( Optional(NL) + - #~ FollowedBy(blockStatementExpr) + - INDENT + (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) + UNDENT) - else: - smExpr = Group( Optional(NL) + - (OneOrMore( PEER + Group(blockStatementExpr) + Optional(NL) )) ) - blockStatementExpr.ignore(_bslash + LineEnd()) - return smExpr.setName('indented block') - -alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") -punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") - -anyOpenTag,anyCloseTag = makeHTMLTags(Word(alphas,alphanums+"_:").setName('any tag')) -_htmlEntityMap = dict(zip("gt lt amp nbsp quot apos".split(),'><& "\'')) -commonHTMLEntity = Regex('&(?P<entity>' + '|'.join(_htmlEntityMap.keys()) +");").setName("common HTML entity") -def replaceHTMLEntity(t): - """Helper parser action to replace common HTML entities with their special characters""" - return _htmlEntityMap.get(t.entity) - -# it's easy to get these comment structures wrong - they're very common, so may as well make them available -cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") -"Comment of the form C{/* ... */}" - -htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") -"Comment of the form C{<!-- ... -->}" - -restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") -dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") -"Comment of the form C{// ... (to end of line)}" - -cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/'| dblSlashComment).setName("C++ style comment") -"Comment of either form C{L{cStyleComment}} or C{L{dblSlashComment}}" - -javaStyleComment = cppStyleComment -"Same as C{L{cppStyleComment}}" - -pythonStyleComment = Regex(r"#.*").setName("Python style comment") -"Comment of the form C{# ... (to end of line)}" - -_commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + - Optional( Word(" \t") + - ~Literal(",") + ~LineEnd() ) ) ).streamline().setName("commaItem") -commaSeparatedList = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("commaSeparatedList") -"""(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. - This expression is deprecated in favor of L{pyparsing_common.comma_separated_list}.""" - -# some other useful expressions - using lower-case class name since we are really using this as a namespace -class pyparsing_common: - """ - Here are some common low-level expressions that may be useful in jump-starting parser development: - - numeric forms (L{integers<integer>}, L{reals<real>}, L{scientific notation<sci_real>}) - - common L{programming identifiers<identifier>} - - network addresses (L{MAC<mac_address>}, L{IPv4<ipv4_address>}, L{IPv6<ipv6_address>}) - - ISO8601 L{dates<iso8601_date>} and L{datetime<iso8601_datetime>} - - L{UUID<uuid>} - - L{comma-separated list<comma_separated_list>} - Parse actions: - - C{L{convertToInteger}} - - C{L{convertToFloat}} - - C{L{convertToDate}} - - C{L{convertToDatetime}} - - C{L{stripHTMLTags}} - - C{L{upcaseTokens}} - - C{L{downcaseTokens}} - - Example:: - pyparsing_common.number.runTests(''' - # any int or real number, returned as the appropriate type - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.fnumber.runTests(''' - # any int or real number, returned as float - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - ''') - - pyparsing_common.hex_integer.runTests(''' - # hex numbers - 100 - FF - ''') - - pyparsing_common.fraction.runTests(''' - # fractions - 1/2 - -3/4 - ''') - - pyparsing_common.mixed_integer.runTests(''' - # mixed fractions - 1 - 1/2 - -3/4 - 1-3/4 - ''') - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(''' - # uuid - 12345678-1234-5678-1234-567812345678 - ''') - prints:: - # any int or real number, returned as the appropriate type - 100 - [100] - - -100 - [-100] - - +100 - [100] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # any int or real number, returned as float - 100 - [100.0] - - -100 - [-100.0] - - +100 - [100.0] - - 3.14159 - [3.14159] - - 6.02e23 - [6.02e+23] - - 1e-12 - [1e-12] - - # hex numbers - 100 - [256] - - FF - [255] - - # fractions - 1/2 - [0.5] - - -3/4 - [-0.75] - - # mixed fractions - 1 - [1] - - 1/2 - [0.5] - - -3/4 - [-0.75] - - 1-3/4 - [1.75] - - # uuid - 12345678-1234-5678-1234-567812345678 - [UUID('12345678-1234-5678-1234-567812345678')] - """ - - convertToInteger = tokenMap(int) - """ - Parse action for converting parsed integers to Python int - """ - - convertToFloat = tokenMap(float) - """ - Parse action for converting parsed numbers to Python float - """ - - integer = Word(nums).setName("integer").setParseAction(convertToInteger) - """expression that parses an unsigned integer, returns an int""" - - hex_integer = Word(hexnums).setName("hex integer").setParseAction(tokenMap(int,16)) - """expression that parses a hexadecimal integer, returns an int""" - - signed_integer = Regex(r'[+-]?\d+').setName("signed integer").setParseAction(convertToInteger) - """expression that parses an integer with optional leading sign, returns an int""" - - fraction = (signed_integer().setParseAction(convertToFloat) + '/' + signed_integer().setParseAction(convertToFloat)).setName("fraction") - """fractional expression of an integer divided by an integer, returns a float""" - fraction.addParseAction(lambda t: t[0]/t[-1]) - - mixed_integer = (fraction | signed_integer + Optional(Optional('-').suppress() + fraction)).setName("fraction or mixed integer-fraction") - """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" - mixed_integer.addParseAction(sum) - - real = Regex(r'[+-]?\d+\.\d*').setName("real number").setParseAction(convertToFloat) - """expression that parses a floating point number and returns a float""" - - sci_real = Regex(r'[+-]?\d+([eE][+-]?\d+|\.\d*([eE][+-]?\d+)?)').setName("real number with scientific notation").setParseAction(convertToFloat) - """expression that parses a floating point number with optional scientific notation and returns a float""" - - # streamlining this expression makes the docs nicer-looking - number = (sci_real | real | signed_integer).streamline() - """any numeric expression, returns the corresponding Python type""" - - fnumber = Regex(r'[+-]?\d+\.?\d*([eE][+-]?\d+)?').setName("fnumber").setParseAction(convertToFloat) - """any int or real number, returned as float""" - - identifier = Word(alphas+'_', alphanums+'_').setName("identifier") - """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" - - ipv4_address = Regex(r'(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}').setName("IPv4 address") - "IPv4 address (C{0.0.0.0 - 255.255.255.255})" - - _ipv6_part = Regex(r'[0-9a-fA-F]{1,4}').setName("hex_integer") - _full_ipv6_address = (_ipv6_part + (':' + _ipv6_part)*7).setName("full IPv6 address") - _short_ipv6_address = (Optional(_ipv6_part + (':' + _ipv6_part)*(0,6)) + "::" + Optional(_ipv6_part + (':' + _ipv6_part)*(0,6))).setName("short IPv6 address") - _short_ipv6_address.addCondition(lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8) - _mixed_ipv6_address = ("::ffff:" + ipv4_address).setName("mixed IPv6 address") - ipv6_address = Combine((_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).setName("IPv6 address")).setName("IPv6 address") - "IPv6 address (long, short, or mixed form)" - - mac_address = Regex(r'[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}').setName("MAC address") - "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" - - @staticmethod - def convertToDate(fmt="%Y-%m-%d"): - """ - Helper to create a parse action for converting parsed date string to Python datetime.date - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%d"}) - - Example:: - date_expr = pyparsing_common.iso8601_date.copy() - date_expr.setParseAction(pyparsing_common.convertToDate()) - print(date_expr.parseString("1999-12-31")) - prints:: - [datetime.date(1999, 12, 31)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt).date() - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - @staticmethod - def convertToDatetime(fmt="%Y-%m-%dT%H:%M:%S.%f"): - """ - Helper to create a parse action for converting parsed datetime string to Python datetime.datetime - - Params - - - fmt - format to be passed to datetime.strptime (default=C{"%Y-%m-%dT%H:%M:%S.%f"}) - - Example:: - dt_expr = pyparsing_common.iso8601_datetime.copy() - dt_expr.setParseAction(pyparsing_common.convertToDatetime()) - print(dt_expr.parseString("1999-12-31T23:59:59.999")) - prints:: - [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] - """ - def cvt_fn(s,l,t): - try: - return datetime.strptime(t[0], fmt) - except ValueError as ve: - raise ParseException(s, l, str(ve)) - return cvt_fn - - iso8601_date = Regex(r'(?P<year>\d{4})(?:-(?P<month>\d\d)(?:-(?P<day>\d\d))?)?').setName("ISO8601 date") - "ISO8601 date (C{yyyy-mm-dd})" - - iso8601_datetime = Regex(r'(?P<year>\d{4})-(?P<month>\d\d)-(?P<day>\d\d)[T ](?P<hour>\d\d):(?P<minute>\d\d)(:(?P<second>\d\d(\.\d*)?)?)?(?P<tz>Z|[+-]\d\d:?\d\d)?').setName("ISO8601 datetime") - "ISO8601 datetime (C{yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)}) - trailing seconds, milliseconds, and timezone optional; accepts separating C{'T'} or C{' '}" - - uuid = Regex(r'[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}').setName("UUID") - "UUID (C{xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx})" - - _html_stripper = anyOpenTag.suppress() | anyCloseTag.suppress() - @staticmethod - def stripHTMLTags(s, l, tokens): - """ - Parse action to remove HTML tags from web page HTML source - - Example:: - # strip HTML links from normal text - text = '<td>More info at the <a href="http://pyparsing.wikispaces.com">pyparsing</a> wiki page</td>' - td,td_end = makeHTMLTags("TD") - table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end - - print(table_text.parseString(text).body) # -> 'More info at the pyparsing wiki page' - """ - return pyparsing_common._html_stripper.transformString(tokens[0]) - - _commasepitem = Combine(OneOrMore(~Literal(",") + ~LineEnd() + Word(printables, excludeChars=',') - + Optional( White(" \t") ) ) ).streamline().setName("commaItem") - comma_separated_list = delimitedList( Optional( quotedString.copy() | _commasepitem, default="") ).setName("comma separated list") - """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" - - upcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).upper())) - """Parse action to convert tokens to upper case.""" - - downcaseTokens = staticmethod(tokenMap(lambda t: _ustr(t).lower())) - """Parse action to convert tokens to lower case.""" - - -if __name__ == "__main__": - - selectToken = CaselessLiteral("select") - fromToken = CaselessLiteral("from") - - ident = Word(alphas, alphanums + "_$") - - columnName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - columnNameList = Group(delimitedList(columnName)).setName("columns") - columnSpec = ('*' | columnNameList) - - tableName = delimitedList(ident, ".", combine=True).setParseAction(upcaseTokens) - tableNameList = Group(delimitedList(tableName)).setName("tables") - - simpleSQL = selectToken("command") + columnSpec("columns") + fromToken + tableNameList("tables") - - # demo runTests method, including embedded comments in test string - simpleSQL.runTests(""" - # '*' as column list and dotted table name - select * from SYS.XYZZY - - # caseless match on "SELECT", and casts back to "select" - SELECT * from XYZZY, ABC - - # list of column names, and mixed case SELECT keyword - Select AA,BB,CC from Sys.dual - - # multiple tables - Select A, B, C from Sys.dual, Table2 - - # invalid SELECT keyword - should fail - Xelect A, B, C from Sys.dual - - # incomplete command - should fail - Select - - # invalid column name - should fail - Select ^^^ frox Sys.dual - - """) - - pyparsing_common.number.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - # any int or real number, returned as float - pyparsing_common.fnumber.runTests(""" - 100 - -100 - +100 - 3.14159 - 6.02e23 - 1e-12 - """) - - pyparsing_common.hex_integer.runTests(""" - 100 - FF - """) - - import uuid - pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) - pyparsing_common.uuid.runTests(""" - 12345678-1234-5678-1234-567812345678 - """) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/six.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/six.py deleted file mode 100644 index 190c023..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/_vendor/six.py +++ /dev/null @@ -1,868 +0,0 @@ -"""Utilities for writing code that runs on Python 2 and 3""" - -# Copyright (c) 2010-2015 Benjamin Peterson -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in all -# copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -# SOFTWARE. - -from __future__ import absolute_import - -import functools -import itertools -import operator -import sys -import types - -__author__ = "Benjamin Peterson <benjamin@python.org>" -__version__ = "1.10.0" - - -# Useful for very coarse version differentiation. -PY2 = sys.version_info[0] == 2 -PY3 = sys.version_info[0] == 3 -PY34 = sys.version_info[0:2] >= (3, 4) - -if PY3: - string_types = str, - integer_types = int, - class_types = type, - text_type = str - binary_type = bytes - - MAXSIZE = sys.maxsize -else: - string_types = basestring, - integer_types = (int, long) - class_types = (type, types.ClassType) - text_type = unicode - binary_type = str - - if sys.platform.startswith("java"): - # Jython always uses 32 bits. - MAXSIZE = int((1 << 31) - 1) - else: - # It's possible to have sizeof(long) != sizeof(Py_ssize_t). - class X(object): - - def __len__(self): - return 1 << 31 - try: - len(X()) - except OverflowError: - # 32-bit - MAXSIZE = int((1 << 31) - 1) - else: - # 64-bit - MAXSIZE = int((1 << 63) - 1) - del X - - -def _add_doc(func, doc): - """Add documentation to a function.""" - func.__doc__ = doc - - -def _import_module(name): - """Import module, returning the module after the last dot.""" - __import__(name) - return sys.modules[name] - - -class _LazyDescr(object): - - def __init__(self, name): - self.name = name - - def __get__(self, obj, tp): - result = self._resolve() - setattr(obj, self.name, result) # Invokes __set__. - try: - # This is a bit ugly, but it avoids running this again by - # removing this descriptor. - delattr(obj.__class__, self.name) - except AttributeError: - pass - return result - - -class MovedModule(_LazyDescr): - - def __init__(self, name, old, new=None): - super(MovedModule, self).__init__(name) - if PY3: - if new is None: - new = name - self.mod = new - else: - self.mod = old - - def _resolve(self): - return _import_module(self.mod) - - def __getattr__(self, attr): - _module = self._resolve() - value = getattr(_module, attr) - setattr(self, attr, value) - return value - - -class _LazyModule(types.ModuleType): - - def __init__(self, name): - super(_LazyModule, self).__init__(name) - self.__doc__ = self.__class__.__doc__ - - def __dir__(self): - attrs = ["__doc__", "__name__"] - attrs += [attr.name for attr in self._moved_attributes] - return attrs - - # Subclasses should override this - _moved_attributes = [] - - -class MovedAttribute(_LazyDescr): - - def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): - super(MovedAttribute, self).__init__(name) - if PY3: - if new_mod is None: - new_mod = name - self.mod = new_mod - if new_attr is None: - if old_attr is None: - new_attr = name - else: - new_attr = old_attr - self.attr = new_attr - else: - self.mod = old_mod - if old_attr is None: - old_attr = name - self.attr = old_attr - - def _resolve(self): - module = _import_module(self.mod) - return getattr(module, self.attr) - - -class _SixMetaPathImporter(object): - - """ - A meta path importer to import six.moves and its submodules. - - This class implements a PEP302 finder and loader. It should be compatible - with Python 2.5 and all existing versions of Python3 - """ - - def __init__(self, six_module_name): - self.name = six_module_name - self.known_modules = {} - - def _add_module(self, mod, *fullnames): - for fullname in fullnames: - self.known_modules[self.name + "." + fullname] = mod - - def _get_module(self, fullname): - return self.known_modules[self.name + "." + fullname] - - def find_module(self, fullname, path=None): - if fullname in self.known_modules: - return self - return None - - def __get_module(self, fullname): - try: - return self.known_modules[fullname] - except KeyError: - raise ImportError("This loader does not know module " + fullname) - - def load_module(self, fullname): - try: - # in case of a reload - return sys.modules[fullname] - except KeyError: - pass - mod = self.__get_module(fullname) - if isinstance(mod, MovedModule): - mod = mod._resolve() - else: - mod.__loader__ = self - sys.modules[fullname] = mod - return mod - - def is_package(self, fullname): - """ - Return true, if the named module is a package. - - We need this method to get correct spec objects with - Python 3.4 (see PEP451) - """ - return hasattr(self.__get_module(fullname), "__path__") - - def get_code(self, fullname): - """Return None - - Required, if is_package is implemented""" - self.__get_module(fullname) # eventually raises ImportError - return None - get_source = get_code # same as get_code - -_importer = _SixMetaPathImporter(__name__) - - -class _MovedItems(_LazyModule): - - """Lazy loading of moved objects""" - __path__ = [] # mark as package - - -_moved_attributes = [ - MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), - MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), - MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), - MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), - MovedAttribute("intern", "__builtin__", "sys"), - MovedAttribute("map", "itertools", "builtins", "imap", "map"), - MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), - MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), - MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), - MovedAttribute("reduce", "__builtin__", "functools"), - MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), - MovedAttribute("StringIO", "StringIO", "io"), - MovedAttribute("UserDict", "UserDict", "collections"), - MovedAttribute("UserList", "UserList", "collections"), - MovedAttribute("UserString", "UserString", "collections"), - MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), - MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), - MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), - MovedModule("builtins", "__builtin__"), - MovedModule("configparser", "ConfigParser"), - MovedModule("copyreg", "copy_reg"), - MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), - MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), - MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), - MovedModule("http_cookies", "Cookie", "http.cookies"), - MovedModule("html_entities", "htmlentitydefs", "html.entities"), - MovedModule("html_parser", "HTMLParser", "html.parser"), - MovedModule("http_client", "httplib", "http.client"), - MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), - MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), - MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), - MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), - MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), - MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), - MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), - MovedModule("cPickle", "cPickle", "pickle"), - MovedModule("queue", "Queue"), - MovedModule("reprlib", "repr"), - MovedModule("socketserver", "SocketServer"), - MovedModule("_thread", "thread", "_thread"), - MovedModule("tkinter", "Tkinter"), - MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), - MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), - MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), - MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), - MovedModule("tkinter_tix", "Tix", "tkinter.tix"), - MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), - MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), - MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), - MovedModule("tkinter_colorchooser", "tkColorChooser", - "tkinter.colorchooser"), - MovedModule("tkinter_commondialog", "tkCommonDialog", - "tkinter.commondialog"), - MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), - MovedModule("tkinter_font", "tkFont", "tkinter.font"), - MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), - MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", - "tkinter.simpledialog"), - MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), - MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), - MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), - MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), - MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), - MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), -] -# Add windows specific modules. -if sys.platform == "win32": - _moved_attributes += [ - MovedModule("winreg", "_winreg"), - ] - -for attr in _moved_attributes: - setattr(_MovedItems, attr.name, attr) - if isinstance(attr, MovedModule): - _importer._add_module(attr, "moves." + attr.name) -del attr - -_MovedItems._moved_attributes = _moved_attributes - -moves = _MovedItems(__name__ + ".moves") -_importer._add_module(moves, "moves") - - -class Module_six_moves_urllib_parse(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_parse""" - - -_urllib_parse_moved_attributes = [ - MovedAttribute("ParseResult", "urlparse", "urllib.parse"), - MovedAttribute("SplitResult", "urlparse", "urllib.parse"), - MovedAttribute("parse_qs", "urlparse", "urllib.parse"), - MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), - MovedAttribute("urldefrag", "urlparse", "urllib.parse"), - MovedAttribute("urljoin", "urlparse", "urllib.parse"), - MovedAttribute("urlparse", "urlparse", "urllib.parse"), - MovedAttribute("urlsplit", "urlparse", "urllib.parse"), - MovedAttribute("urlunparse", "urlparse", "urllib.parse"), - MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), - MovedAttribute("quote", "urllib", "urllib.parse"), - MovedAttribute("quote_plus", "urllib", "urllib.parse"), - MovedAttribute("unquote", "urllib", "urllib.parse"), - MovedAttribute("unquote_plus", "urllib", "urllib.parse"), - MovedAttribute("urlencode", "urllib", "urllib.parse"), - MovedAttribute("splitquery", "urllib", "urllib.parse"), - MovedAttribute("splittag", "urllib", "urllib.parse"), - MovedAttribute("splituser", "urllib", "urllib.parse"), - MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), - MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), - MovedAttribute("uses_params", "urlparse", "urllib.parse"), - MovedAttribute("uses_query", "urlparse", "urllib.parse"), - MovedAttribute("uses_relative", "urlparse", "urllib.parse"), -] -for attr in _urllib_parse_moved_attributes: - setattr(Module_six_moves_urllib_parse, attr.name, attr) -del attr - -Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes - -_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), - "moves.urllib_parse", "moves.urllib.parse") - - -class Module_six_moves_urllib_error(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_error""" - - -_urllib_error_moved_attributes = [ - MovedAttribute("URLError", "urllib2", "urllib.error"), - MovedAttribute("HTTPError", "urllib2", "urllib.error"), - MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), -] -for attr in _urllib_error_moved_attributes: - setattr(Module_six_moves_urllib_error, attr.name, attr) -del attr - -Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes - -_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), - "moves.urllib_error", "moves.urllib.error") - - -class Module_six_moves_urllib_request(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_request""" - - -_urllib_request_moved_attributes = [ - MovedAttribute("urlopen", "urllib2", "urllib.request"), - MovedAttribute("install_opener", "urllib2", "urllib.request"), - MovedAttribute("build_opener", "urllib2", "urllib.request"), - MovedAttribute("pathname2url", "urllib", "urllib.request"), - MovedAttribute("url2pathname", "urllib", "urllib.request"), - MovedAttribute("getproxies", "urllib", "urllib.request"), - MovedAttribute("Request", "urllib2", "urllib.request"), - MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), - MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), - MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), - MovedAttribute("BaseHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), - MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), - MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), - MovedAttribute("FileHandler", "urllib2", "urllib.request"), - MovedAttribute("FTPHandler", "urllib2", "urllib.request"), - MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), - MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), - MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), - MovedAttribute("urlretrieve", "urllib", "urllib.request"), - MovedAttribute("urlcleanup", "urllib", "urllib.request"), - MovedAttribute("URLopener", "urllib", "urllib.request"), - MovedAttribute("FancyURLopener", "urllib", "urllib.request"), - MovedAttribute("proxy_bypass", "urllib", "urllib.request"), -] -for attr in _urllib_request_moved_attributes: - setattr(Module_six_moves_urllib_request, attr.name, attr) -del attr - -Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes - -_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), - "moves.urllib_request", "moves.urllib.request") - - -class Module_six_moves_urllib_response(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_response""" - - -_urllib_response_moved_attributes = [ - MovedAttribute("addbase", "urllib", "urllib.response"), - MovedAttribute("addclosehook", "urllib", "urllib.response"), - MovedAttribute("addinfo", "urllib", "urllib.response"), - MovedAttribute("addinfourl", "urllib", "urllib.response"), -] -for attr in _urllib_response_moved_attributes: - setattr(Module_six_moves_urllib_response, attr.name, attr) -del attr - -Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes - -_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), - "moves.urllib_response", "moves.urllib.response") - - -class Module_six_moves_urllib_robotparser(_LazyModule): - - """Lazy loading of moved objects in six.moves.urllib_robotparser""" - - -_urllib_robotparser_moved_attributes = [ - MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), -] -for attr in _urllib_robotparser_moved_attributes: - setattr(Module_six_moves_urllib_robotparser, attr.name, attr) -del attr - -Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes - -_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), - "moves.urllib_robotparser", "moves.urllib.robotparser") - - -class Module_six_moves_urllib(types.ModuleType): - - """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" - __path__ = [] # mark as package - parse = _importer._get_module("moves.urllib_parse") - error = _importer._get_module("moves.urllib_error") - request = _importer._get_module("moves.urllib_request") - response = _importer._get_module("moves.urllib_response") - robotparser = _importer._get_module("moves.urllib_robotparser") - - def __dir__(self): - return ['parse', 'error', 'request', 'response', 'robotparser'] - -_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), - "moves.urllib") - - -def add_move(move): - """Add an item to six.moves.""" - setattr(_MovedItems, move.name, move) - - -def remove_move(name): - """Remove item from six.moves.""" - try: - delattr(_MovedItems, name) - except AttributeError: - try: - del moves.__dict__[name] - except KeyError: - raise AttributeError("no such move, %r" % (name,)) - - -if PY3: - _meth_func = "__func__" - _meth_self = "__self__" - - _func_closure = "__closure__" - _func_code = "__code__" - _func_defaults = "__defaults__" - _func_globals = "__globals__" -else: - _meth_func = "im_func" - _meth_self = "im_self" - - _func_closure = "func_closure" - _func_code = "func_code" - _func_defaults = "func_defaults" - _func_globals = "func_globals" - - -try: - advance_iterator = next -except NameError: - def advance_iterator(it): - return it.next() -next = advance_iterator - - -try: - callable = callable -except NameError: - def callable(obj): - return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) - - -if PY3: - def get_unbound_function(unbound): - return unbound - - create_bound_method = types.MethodType - - def create_unbound_method(func, cls): - return func - - Iterator = object -else: - def get_unbound_function(unbound): - return unbound.im_func - - def create_bound_method(func, obj): - return types.MethodType(func, obj, obj.__class__) - - def create_unbound_method(func, cls): - return types.MethodType(func, None, cls) - - class Iterator(object): - - def next(self): - return type(self).__next__(self) - - callable = callable -_add_doc(get_unbound_function, - """Get the function out of a possibly unbound function""") - - -get_method_function = operator.attrgetter(_meth_func) -get_method_self = operator.attrgetter(_meth_self) -get_function_closure = operator.attrgetter(_func_closure) -get_function_code = operator.attrgetter(_func_code) -get_function_defaults = operator.attrgetter(_func_defaults) -get_function_globals = operator.attrgetter(_func_globals) - - -if PY3: - def iterkeys(d, **kw): - return iter(d.keys(**kw)) - - def itervalues(d, **kw): - return iter(d.values(**kw)) - - def iteritems(d, **kw): - return iter(d.items(**kw)) - - def iterlists(d, **kw): - return iter(d.lists(**kw)) - - viewkeys = operator.methodcaller("keys") - - viewvalues = operator.methodcaller("values") - - viewitems = operator.methodcaller("items") -else: - def iterkeys(d, **kw): - return d.iterkeys(**kw) - - def itervalues(d, **kw): - return d.itervalues(**kw) - - def iteritems(d, **kw): - return d.iteritems(**kw) - - def iterlists(d, **kw): - return d.iterlists(**kw) - - viewkeys = operator.methodcaller("viewkeys") - - viewvalues = operator.methodcaller("viewvalues") - - viewitems = operator.methodcaller("viewitems") - -_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") -_add_doc(itervalues, "Return an iterator over the values of a dictionary.") -_add_doc(iteritems, - "Return an iterator over the (key, value) pairs of a dictionary.") -_add_doc(iterlists, - "Return an iterator over the (key, [values]) pairs of a dictionary.") - - -if PY3: - def b(s): - return s.encode("latin-1") - - def u(s): - return s - unichr = chr - import struct - int2byte = struct.Struct(">B").pack - del struct - byte2int = operator.itemgetter(0) - indexbytes = operator.getitem - iterbytes = iter - import io - StringIO = io.StringIO - BytesIO = io.BytesIO - _assertCountEqual = "assertCountEqual" - if sys.version_info[1] <= 1: - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" - else: - _assertRaisesRegex = "assertRaisesRegex" - _assertRegex = "assertRegex" -else: - def b(s): - return s - # Workaround for standalone backslash - - def u(s): - return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") - unichr = unichr - int2byte = chr - - def byte2int(bs): - return ord(bs[0]) - - def indexbytes(buf, i): - return ord(buf[i]) - iterbytes = functools.partial(itertools.imap, ord) - import StringIO - StringIO = BytesIO = StringIO.StringIO - _assertCountEqual = "assertItemsEqual" - _assertRaisesRegex = "assertRaisesRegexp" - _assertRegex = "assertRegexpMatches" -_add_doc(b, """Byte literal""") -_add_doc(u, """Text literal""") - - -def assertCountEqual(self, *args, **kwargs): - return getattr(self, _assertCountEqual)(*args, **kwargs) - - -def assertRaisesRegex(self, *args, **kwargs): - return getattr(self, _assertRaisesRegex)(*args, **kwargs) - - -def assertRegex(self, *args, **kwargs): - return getattr(self, _assertRegex)(*args, **kwargs) - - -if PY3: - exec_ = getattr(moves.builtins, "exec") - - def reraise(tp, value, tb=None): - if value is None: - value = tp() - if value.__traceback__ is not tb: - raise value.with_traceback(tb) - raise value - -else: - def exec_(_code_, _globs_=None, _locs_=None): - """Execute code in a namespace.""" - if _globs_ is None: - frame = sys._getframe(1) - _globs_ = frame.f_globals - if _locs_ is None: - _locs_ = frame.f_locals - del frame - elif _locs_ is None: - _locs_ = _globs_ - exec("""exec _code_ in _globs_, _locs_""") - - exec_("""def reraise(tp, value, tb=None): - raise tp, value, tb -""") - - -if sys.version_info[:2] == (3, 2): - exec_("""def raise_from(value, from_value): - if from_value is None: - raise value - raise value from from_value -""") -elif sys.version_info[:2] > (3, 2): - exec_("""def raise_from(value, from_value): - raise value from from_value -""") -else: - def raise_from(value, from_value): - raise value - - -print_ = getattr(moves.builtins, "print", None) -if print_ is None: - def print_(*args, **kwargs): - """The new-style print function for Python 2.4 and 2.5.""" - fp = kwargs.pop("file", sys.stdout) - if fp is None: - return - - def write(data): - if not isinstance(data, basestring): - data = str(data) - # If the file has an encoding, encode unicode with it. - if (isinstance(fp, file) and - isinstance(data, unicode) and - fp.encoding is not None): - errors = getattr(fp, "errors", None) - if errors is None: - errors = "strict" - data = data.encode(fp.encoding, errors) - fp.write(data) - want_unicode = False - sep = kwargs.pop("sep", None) - if sep is not None: - if isinstance(sep, unicode): - want_unicode = True - elif not isinstance(sep, str): - raise TypeError("sep must be None or a string") - end = kwargs.pop("end", None) - if end is not None: - if isinstance(end, unicode): - want_unicode = True - elif not isinstance(end, str): - raise TypeError("end must be None or a string") - if kwargs: - raise TypeError("invalid keyword arguments to print()") - if not want_unicode: - for arg in args: - if isinstance(arg, unicode): - want_unicode = True - break - if want_unicode: - newline = unicode("\n") - space = unicode(" ") - else: - newline = "\n" - space = " " - if sep is None: - sep = space - if end is None: - end = newline - for i, arg in enumerate(args): - if i: - write(sep) - write(arg) - write(end) -if sys.version_info[:2] < (3, 3): - _print = print_ - - def print_(*args, **kwargs): - fp = kwargs.get("file", sys.stdout) - flush = kwargs.pop("flush", False) - _print(*args, **kwargs) - if flush and fp is not None: - fp.flush() - -_add_doc(reraise, """Reraise an exception.""") - -if sys.version_info[0:2] < (3, 4): - def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, - updated=functools.WRAPPER_UPDATES): - def wrapper(f): - f = functools.wraps(wrapped, assigned, updated)(f) - f.__wrapped__ = wrapped - return f - return wrapper -else: - wraps = functools.wraps - - -def with_metaclass(meta, *bases): - """Create a base class with a metaclass.""" - # This requires a bit of explanation: the basic idea is to make a dummy - # metaclass for one level of class instantiation that replaces itself with - # the actual metaclass. - class metaclass(meta): - - def __new__(cls, name, this_bases, d): - return meta(name, bases, d) - return type.__new__(metaclass, 'temporary_class', (), {}) - - -def add_metaclass(metaclass): - """Class decorator for creating a class with a metaclass.""" - def wrapper(cls): - orig_vars = cls.__dict__.copy() - slots = orig_vars.get('__slots__') - if slots is not None: - if isinstance(slots, str): - slots = [slots] - for slots_var in slots: - orig_vars.pop(slots_var) - orig_vars.pop('__dict__', None) - orig_vars.pop('__weakref__', None) - return metaclass(cls.__name__, cls.__bases__, orig_vars) - return wrapper - - -def python_2_unicode_compatible(klass): - """ - A decorator that defines __unicode__ and __str__ methods under Python 2. - Under Python 3 it does nothing. - - To support Python 2 and 3 with a single code base, define a __str__ method - returning text and apply this decorator to the class. - """ - if PY2: - if '__str__' not in klass.__dict__: - raise ValueError("@python_2_unicode_compatible cannot be applied " - "to %s because it doesn't define __str__()." % - klass.__name__) - klass.__unicode__ = klass.__str__ - klass.__str__ = lambda self: self.__unicode__().encode('utf-8') - return klass - - -# Complete the moves implementation. -# This code is at the end of this module to speed up module loading. -# Turn this module into a package. -__path__ = [] # required for PEP 302 and PEP 451 -__package__ = __name__ # see PEP 366 @ReservedAssignment -if globals().get("__spec__") is not None: - __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable -# Remove other six meta path importers, since they cause problems. This can -# happen if six is removed from sys.modules and then reloaded. (Setuptools does -# this for some reason.) -if sys.meta_path: - for i, importer in enumerate(sys.meta_path): - # Here's some real nastiness: Another "instance" of the six module might - # be floating around. Therefore, we can't use isinstance() to check for - # the six meta path importer, since the other six instance will have - # inserted an importer with different class. - if (type(importer).__name__ == "_SixMetaPathImporter" and - importer.name == __name__): - del sys.meta_path[i] - break - del i, importer -# Finally, add the importer to the meta path import hook. -sys.meta_path.append(_importer) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/archive_util.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/archive_util.py deleted file mode 100644 index 8143604..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/archive_util.py +++ /dev/null @@ -1,173 +0,0 @@ -"""Utilities for extracting common archive formats""" - -import zipfile -import tarfile -import os -import shutil -import posixpath -import contextlib -from distutils.errors import DistutilsError - -from pkg_resources import ensure_directory - -__all__ = [ - "unpack_archive", "unpack_zipfile", "unpack_tarfile", "default_filter", - "UnrecognizedFormat", "extraction_drivers", "unpack_directory", -] - - -class UnrecognizedFormat(DistutilsError): - """Couldn't recognize the archive type""" - - -def default_filter(src, dst): - """The default progress/filter callback; returns True for all files""" - return dst - - -def unpack_archive(filename, extract_dir, progress_filter=default_filter, - drivers=None): - """Unpack `filename` to `extract_dir`, or raise ``UnrecognizedFormat`` - - `progress_filter` is a function taking two arguments: a source path - internal to the archive ('/'-separated), and a filesystem path where it - will be extracted. The callback must return the desired extract path - (which may be the same as the one passed in), or else ``None`` to skip - that file or directory. The callback can thus be used to report on the - progress of the extraction, as well as to filter the items extracted or - alter their extraction paths. - - `drivers`, if supplied, must be a non-empty sequence of functions with the - same signature as this function (minus the `drivers` argument), that raise - ``UnrecognizedFormat`` if they do not support extracting the designated - archive type. The `drivers` are tried in sequence until one is found that - does not raise an error, or until all are exhausted (in which case - ``UnrecognizedFormat`` is raised). If you do not supply a sequence of - drivers, the module's ``extraction_drivers`` constant will be used, which - means that ``unpack_zipfile`` and ``unpack_tarfile`` will be tried, in that - order. - """ - for driver in drivers or extraction_drivers: - try: - driver(filename, extract_dir, progress_filter) - except UnrecognizedFormat: - continue - else: - return - else: - raise UnrecognizedFormat( - "Not a recognized archive type: %s" % filename - ) - - -def unpack_directory(filename, extract_dir, progress_filter=default_filter): - """"Unpack" a directory, using the same interface as for archives - - Raises ``UnrecognizedFormat`` if `filename` is not a directory - """ - if not os.path.isdir(filename): - raise UnrecognizedFormat("%s is not a directory" % filename) - - paths = { - filename: ('', extract_dir), - } - for base, dirs, files in os.walk(filename): - src, dst = paths[base] - for d in dirs: - paths[os.path.join(base, d)] = src + d + '/', os.path.join(dst, d) - for f in files: - target = os.path.join(dst, f) - target = progress_filter(src + f, target) - if not target: - # skip non-files - continue - ensure_directory(target) - f = os.path.join(base, f) - shutil.copyfile(f, target) - shutil.copystat(f, target) - - -def unpack_zipfile(filename, extract_dir, progress_filter=default_filter): - """Unpack zip `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a zipfile (as determined - by ``zipfile.is_zipfile()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - - if not zipfile.is_zipfile(filename): - raise UnrecognizedFormat("%s is not a zip file" % (filename,)) - - with zipfile.ZipFile(filename) as z: - for info in z.infolist(): - name = info.filename - - # don't extract absolute paths or ones with .. in them - if name.startswith('/') or '..' in name.split('/'): - continue - - target = os.path.join(extract_dir, *name.split('/')) - target = progress_filter(name, target) - if not target: - continue - if name.endswith('/'): - # directory - ensure_directory(target) - else: - # file - ensure_directory(target) - data = z.read(info.filename) - with open(target, 'wb') as f: - f.write(data) - unix_attributes = info.external_attr >> 16 - if unix_attributes: - os.chmod(target, unix_attributes) - - -def unpack_tarfile(filename, extract_dir, progress_filter=default_filter): - """Unpack tar/tar.gz/tar.bz2 `filename` to `extract_dir` - - Raises ``UnrecognizedFormat`` if `filename` is not a tarfile (as determined - by ``tarfile.open()``). See ``unpack_archive()`` for an explanation - of the `progress_filter` argument. - """ - try: - tarobj = tarfile.open(filename) - except tarfile.TarError: - raise UnrecognizedFormat( - "%s is not a compressed or uncompressed tar file" % (filename,) - ) - with contextlib.closing(tarobj): - # don't do any chowning! - tarobj.chown = lambda *args: None - for member in tarobj: - name = member.name - # don't extract absolute paths or ones with .. in them - if not name.startswith('/') and '..' not in name.split('/'): - prelim_dst = os.path.join(extract_dir, *name.split('/')) - - # resolve any links and to extract the link targets as normal - # files - while member is not None and (member.islnk() or member.issym()): - linkpath = member.linkname - if member.issym(): - base = posixpath.dirname(member.name) - linkpath = posixpath.join(base, linkpath) - linkpath = posixpath.normpath(linkpath) - member = tarobj._getmember(linkpath) - - if member is not None and (member.isfile() or member.isdir()): - final_dst = progress_filter(name, prelim_dst) - if final_dst: - if final_dst.endswith(os.sep): - final_dst = final_dst[:-1] - try: - # XXX Ugh - tarobj._extract_member(member, final_dst) - except tarfile.ExtractError: - # chown/chmod/mkfifo/mknode/makedev failed - pass - return True - - -extraction_drivers = unpack_directory, unpack_zipfile, unpack_tarfile diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/build_meta.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/build_meta.py deleted file mode 100644 index 609ea1e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/build_meta.py +++ /dev/null @@ -1,172 +0,0 @@ -"""A PEP 517 interface to setuptools - -Previously, when a user or a command line tool (let's call it a "frontend") -needed to make a request of setuptools to take a certain action, for -example, generating a list of installation requirements, the frontend would -would call "setup.py egg_info" or "setup.py bdist_wheel" on the command line. - -PEP 517 defines a different method of interfacing with setuptools. Rather -than calling "setup.py" directly, the frontend should: - - 1. Set the current directory to the directory with a setup.py file - 2. Import this module into a safe python interpreter (one in which - setuptools can potentially set global variables or crash hard). - 3. Call one of the functions defined in PEP 517. - -What each function does is defined in PEP 517. However, here is a "casual" -definition of the functions (this definition should not be relied on for -bug reports or API stability): - - - `build_wheel`: build a wheel in the folder and return the basename - - `get_requires_for_build_wheel`: get the `setup_requires` to build - - `prepare_metadata_for_build_wheel`: get the `install_requires` - - `build_sdist`: build an sdist in the folder and return the basename - - `get_requires_for_build_sdist`: get the `setup_requires` to build - -Again, this is not a formal definition! Just a "taste" of the module. -""" - -import os -import sys -import tokenize -import shutil -import contextlib - -import setuptools -import distutils - - -class SetupRequirementsError(BaseException): - def __init__(self, specifiers): - self.specifiers = specifiers - - -class Distribution(setuptools.dist.Distribution): - def fetch_build_eggs(self, specifiers): - raise SetupRequirementsError(specifiers) - - @classmethod - @contextlib.contextmanager - def patch(cls): - """ - Replace - distutils.dist.Distribution with this class - for the duration of this context. - """ - orig = distutils.core.Distribution - distutils.core.Distribution = cls - try: - yield - finally: - distutils.core.Distribution = orig - - -def _run_setup(setup_script='setup.py'): - # Note that we can reuse our build directory between calls - # Correctness comes first, then optimization later - __file__ = setup_script - __name__ = '__main__' - f = getattr(tokenize, 'open', open)(__file__) - code = f.read().replace('\\r\\n', '\\n') - f.close() - exec(compile(code, __file__, 'exec'), locals()) - - -def _fix_config(config_settings): - config_settings = config_settings or {} - config_settings.setdefault('--global-option', []) - return config_settings - - -def _get_build_requires(config_settings): - config_settings = _fix_config(config_settings) - requirements = ['setuptools', 'wheel'] - - sys.argv = sys.argv[:1] + ['egg_info'] + \ - config_settings["--global-option"] - try: - with Distribution.patch(): - _run_setup() - except SetupRequirementsError as e: - requirements += e.specifiers - - return requirements - - -def _get_immediate_subdirectories(a_dir): - return [name for name in os.listdir(a_dir) - if os.path.isdir(os.path.join(a_dir, name))] - - -def get_requires_for_build_wheel(config_settings=None): - config_settings = _fix_config(config_settings) - return _get_build_requires(config_settings) - - -def get_requires_for_build_sdist(config_settings=None): - config_settings = _fix_config(config_settings) - return _get_build_requires(config_settings) - - -def prepare_metadata_for_build_wheel(metadata_directory, config_settings=None): - sys.argv = sys.argv[:1] + ['dist_info', '--egg-base', metadata_directory] - _run_setup() - - dist_info_directory = metadata_directory - while True: - dist_infos = [f for f in os.listdir(dist_info_directory) - if f.endswith('.dist-info')] - - if len(dist_infos) == 0 and \ - len(_get_immediate_subdirectories(dist_info_directory)) == 1: - dist_info_directory = os.path.join( - dist_info_directory, os.listdir(dist_info_directory)[0]) - continue - - assert len(dist_infos) == 1 - break - - # PEP 517 requires that the .dist-info directory be placed in the - # metadata_directory. To comply, we MUST copy the directory to the root - if dist_info_directory != metadata_directory: - shutil.move( - os.path.join(dist_info_directory, dist_infos[0]), - metadata_directory) - shutil.rmtree(dist_info_directory, ignore_errors=True) - - return dist_infos[0] - - -def build_wheel(wheel_directory, config_settings=None, - metadata_directory=None): - config_settings = _fix_config(config_settings) - wheel_directory = os.path.abspath(wheel_directory) - sys.argv = sys.argv[:1] + ['bdist_wheel'] + \ - config_settings["--global-option"] - _run_setup() - if wheel_directory != 'dist': - shutil.rmtree(wheel_directory) - shutil.copytree('dist', wheel_directory) - - wheels = [f for f in os.listdir(wheel_directory) - if f.endswith('.whl')] - - assert len(wheels) == 1 - return wheels[0] - - -def build_sdist(sdist_directory, config_settings=None): - config_settings = _fix_config(config_settings) - sdist_directory = os.path.abspath(sdist_directory) - sys.argv = sys.argv[:1] + ['sdist'] + \ - config_settings["--global-option"] - _run_setup() - if sdist_directory != 'dist': - shutil.rmtree(sdist_directory) - shutil.copytree('dist', sdist_directory) - - sdists = [f for f in os.listdir(sdist_directory) - if f.endswith('.tar.gz')] - - assert len(sdists) == 1 - return sdists[0] diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-32.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-32.exe deleted file mode 100644 index b1487b7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-32.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-64.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-64.exe deleted file mode 100644 index 675e6bf..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli-64.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/cli.exe deleted file mode 100644 index b1487b7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/cli.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__init__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__init__.py deleted file mode 100644 index fe619e2..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__init__.py +++ /dev/null @@ -1,18 +0,0 @@ -__all__ = [ - 'alias', 'bdist_egg', 'bdist_rpm', 'build_ext', 'build_py', 'develop', - 'easy_install', 'egg_info', 'install', 'install_lib', 'rotate', 'saveopts', - 'sdist', 'setopt', 'test', 'install_egg_info', 'install_scripts', - 'register', 'bdist_wininst', 'upload_docs', 'upload', 'build_clib', - 'dist_info', -] - -from distutils.command.bdist import bdist -import sys - -from setuptools.command import install_scripts - -if 'egg' not in bdist.format_commands: - bdist.format_command['egg'] = ('bdist_egg', "Python .egg file") - bdist.format_commands.append('egg') - -del bdist, sys diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index b06de61..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/alias.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/alias.cpython-36.pyc deleted file mode 100644 index 108ad55..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/alias.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-36.pyc deleted file mode 100644 index c5154af..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_egg.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-36.pyc deleted file mode 100644 index 036ed74..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_rpm.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-36.pyc deleted file mode 100644 index 1bfb707..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/bdist_wininst.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_clib.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_clib.cpython-36.pyc deleted file mode 100644 index fd25192..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_clib.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_ext.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_ext.cpython-36.pyc deleted file mode 100644 index de89cfa..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_ext.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_py.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_py.cpython-36.pyc deleted file mode 100644 index 993c3d6..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/build_py.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/develop.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/develop.cpython-36.pyc deleted file mode 100644 index f0e0977..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/develop.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/dist_info.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/dist_info.cpython-36.pyc deleted file mode 100644 index d0abd37..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/dist_info.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/easy_install.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/easy_install.cpython-36.pyc deleted file mode 100644 index 9e49976..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/easy_install.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/egg_info.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/egg_info.cpython-36.pyc deleted file mode 100644 index d17eb58..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/egg_info.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install.cpython-36.pyc deleted file mode 100644 index 9b37132..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-36.pyc deleted file mode 100644 index 248b7f1..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_egg_info.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_lib.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_lib.cpython-36.pyc deleted file mode 100644 index 0277df4..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_lib.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_scripts.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_scripts.cpython-36.pyc deleted file mode 100644 index 50837e8..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/install_scripts.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/py36compat.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/py36compat.cpython-36.pyc deleted file mode 100644 index 8bb2a45..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/py36compat.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/register.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/register.cpython-36.pyc deleted file mode 100644 index 341a048..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/register.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/rotate.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/rotate.cpython-36.pyc deleted file mode 100644 index 0dc9be7..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/rotate.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/saveopts.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/saveopts.cpython-36.pyc deleted file mode 100644 index b4d9dd9..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/saveopts.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/sdist.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/sdist.cpython-36.pyc deleted file mode 100644 index 9f14e09..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/sdist.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/setopt.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/setopt.cpython-36.pyc deleted file mode 100644 index 2f9b7de..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/setopt.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/test.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/test.cpython-36.pyc deleted file mode 100644 index f286f9c..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/test.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload.cpython-36.pyc deleted file mode 100644 index 1753479..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload_docs.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload_docs.cpython-36.pyc deleted file mode 100644 index 433f634..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/__pycache__/upload_docs.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/alias.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/alias.py deleted file mode 100644 index 4532b1c..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/alias.py +++ /dev/null @@ -1,80 +0,0 @@ -from distutils.errors import DistutilsOptionError - -from setuptools.extern.six.moves import map - -from setuptools.command.setopt import edit_config, option_base, config_file - - -def shquote(arg): - """Quote an argument for later parsing by shlex.split()""" - for c in '"', "'", "\\", "#": - if c in arg: - return repr(arg) - if arg.split() != [arg]: - return repr(arg) - return arg - - -class alias(option_base): - """Define a shortcut that invokes one or more commands""" - - description = "define a shortcut to invoke one or more commands" - command_consumes_arguments = True - - user_options = [ - ('remove', 'r', 'remove (unset) the alias'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.args = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.remove and len(self.args) != 1: - raise DistutilsOptionError( - "Must specify exactly one argument (the alias name) when " - "using --remove" - ) - - def run(self): - aliases = self.distribution.get_option_dict('aliases') - - if not self.args: - print("Command Aliases") - print("---------------") - for alias in aliases: - print("setup.py alias", format_alias(alias, aliases)) - return - - elif len(self.args) == 1: - alias, = self.args - if self.remove: - command = None - elif alias in aliases: - print("setup.py alias", format_alias(alias, aliases)) - return - else: - print("No alias definition found for %r" % alias) - return - else: - alias = self.args[0] - command = ' '.join(map(shquote, self.args[1:])) - - edit_config(self.filename, {'aliases': {alias: command}}, self.dry_run) - - -def format_alias(name, aliases): - source, command = aliases[name] - if source == config_file('global'): - source = '--global-config ' - elif source == config_file('user'): - source = '--user-config ' - elif source == config_file('local'): - source = '' - else: - source = '--filename=%r' % source - return source + name + ' ' + command diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_egg.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_egg.py deleted file mode 100644 index 423b818..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_egg.py +++ /dev/null @@ -1,502 +0,0 @@ -"""setuptools.command.bdist_egg - -Build .egg distributions""" - -from distutils.errors import DistutilsSetupError -from distutils.dir_util import remove_tree, mkpath -from distutils import log -from types import CodeType -import sys -import os -import re -import textwrap -import marshal - -from setuptools.extern import six - -from pkg_resources import get_build_platform, Distribution, ensure_directory -from pkg_resources import EntryPoint -from setuptools.extension import Library -from setuptools import Command - -try: - # Python 2.7 or >=3.2 - from sysconfig import get_path, get_python_version - - def _get_purelib(): - return get_path("purelib") -except ImportError: - from distutils.sysconfig import get_python_lib, get_python_version - - def _get_purelib(): - return get_python_lib(False) - - -def strip_module(filename): - if '.' in filename: - filename = os.path.splitext(filename)[0] - if filename.endswith('module'): - filename = filename[:-6] - return filename - - -def sorted_walk(dir): - """Do os.walk in a reproducible way, - independent of indeterministic filesystem readdir order - """ - for base, dirs, files in os.walk(dir): - dirs.sort() - files.sort() - yield base, dirs, files - - -def write_stub(resource, pyfile): - _stub_template = textwrap.dedent(""" - def __bootstrap__(): - global __bootstrap__, __loader__, __file__ - import sys, pkg_resources, imp - __file__ = pkg_resources.resource_filename(__name__, %r) - __loader__ = None; del __bootstrap__, __loader__ - imp.load_dynamic(__name__,__file__) - __bootstrap__() - """).lstrip() - with open(pyfile, 'w') as f: - f.write(_stub_template % resource) - - -class bdist_egg(Command): - description = "create an \"egg\" distribution" - - user_options = [ - ('bdist-dir=', 'b', - "temporary directory for creating the distribution"), - ('plat-name=', 'p', "platform name to embed in generated filenames " - "(default: %s)" % get_build_platform()), - ('exclude-source-files', None, - "remove all .py files from the generated egg"), - ('keep-temp', 'k', - "keep the pseudo-installation tree around after " + - "creating the distribution archive"), - ('dist-dir=', 'd', - "directory to put final built distributions in"), - ('skip-build', None, - "skip rebuilding everything (for testing/debugging)"), - ] - - boolean_options = [ - 'keep-temp', 'skip-build', 'exclude-source-files' - ] - - def initialize_options(self): - self.bdist_dir = None - self.plat_name = None - self.keep_temp = 0 - self.dist_dir = None - self.skip_build = 0 - self.egg_output = None - self.exclude_source_files = None - - def finalize_options(self): - ei_cmd = self.ei_cmd = self.get_finalized_command("egg_info") - self.egg_info = ei_cmd.egg_info - - if self.bdist_dir is None: - bdist_base = self.get_finalized_command('bdist').bdist_base - self.bdist_dir = os.path.join(bdist_base, 'egg') - - if self.plat_name is None: - self.plat_name = get_build_platform() - - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - if self.egg_output is None: - - # Compute filename of the output egg - basename = Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version, - get_python_version(), - self.distribution.has_ext_modules() and self.plat_name - ).egg_name() - - self.egg_output = os.path.join(self.dist_dir, basename + '.egg') - - def do_install_data(self): - # Hack for packages that install data to install's --install-lib - self.get_finalized_command('install').install_lib = self.bdist_dir - - site_packages = os.path.normcase(os.path.realpath(_get_purelib())) - old, self.distribution.data_files = self.distribution.data_files, [] - - for item in old: - if isinstance(item, tuple) and len(item) == 2: - if os.path.isabs(item[0]): - realpath = os.path.realpath(item[0]) - normalized = os.path.normcase(realpath) - if normalized == site_packages or normalized.startswith( - site_packages + os.sep - ): - item = realpath[len(site_packages) + 1:], item[1] - # XXX else: raise ??? - self.distribution.data_files.append(item) - - try: - log.info("installing package data to %s", self.bdist_dir) - self.call_command('install_data', force=0, root=None) - finally: - self.distribution.data_files = old - - def get_outputs(self): - return [self.egg_output] - - def call_command(self, cmdname, **kw): - """Invoke reinitialized command `cmdname` with keyword args""" - for dirname in INSTALL_DIRECTORY_ATTRS: - kw.setdefault(dirname, self.bdist_dir) - kw.setdefault('skip_build', self.skip_build) - kw.setdefault('dry_run', self.dry_run) - cmd = self.reinitialize_command(cmdname, **kw) - self.run_command(cmdname) - return cmd - - def run(self): - # Generate metadata first - self.run_command("egg_info") - # We run install_lib before install_data, because some data hacks - # pull their data path from the install_lib command. - log.info("installing library code to %s", self.bdist_dir) - instcmd = self.get_finalized_command('install') - old_root = instcmd.root - instcmd.root = None - if self.distribution.has_c_libraries() and not self.skip_build: - self.run_command('build_clib') - cmd = self.call_command('install_lib', warn_dir=0) - instcmd.root = old_root - - all_outputs, ext_outputs = self.get_ext_outputs() - self.stubs = [] - to_compile = [] - for (p, ext_name) in enumerate(ext_outputs): - filename, ext = os.path.splitext(ext_name) - pyfile = os.path.join(self.bdist_dir, strip_module(filename) + - '.py') - self.stubs.append(pyfile) - log.info("creating stub loader for %s", ext_name) - if not self.dry_run: - write_stub(os.path.basename(ext_name), pyfile) - to_compile.append(pyfile) - ext_outputs[p] = ext_name.replace(os.sep, '/') - - if to_compile: - cmd.byte_compile(to_compile) - if self.distribution.data_files: - self.do_install_data() - - # Make the EGG-INFO directory - archive_root = self.bdist_dir - egg_info = os.path.join(archive_root, 'EGG-INFO') - self.mkpath(egg_info) - if self.distribution.scripts: - script_dir = os.path.join(egg_info, 'scripts') - log.info("installing scripts to %s", script_dir) - self.call_command('install_scripts', install_dir=script_dir, - no_ep=1) - - self.copy_metadata_to(egg_info) - native_libs = os.path.join(egg_info, "native_libs.txt") - if all_outputs: - log.info("writing %s", native_libs) - if not self.dry_run: - ensure_directory(native_libs) - libs_file = open(native_libs, 'wt') - libs_file.write('\n'.join(all_outputs)) - libs_file.write('\n') - libs_file.close() - elif os.path.isfile(native_libs): - log.info("removing %s", native_libs) - if not self.dry_run: - os.unlink(native_libs) - - write_safety_flag( - os.path.join(archive_root, 'EGG-INFO'), self.zip_safe() - ) - - if os.path.exists(os.path.join(self.egg_info, 'depends.txt')): - log.warn( - "WARNING: 'depends.txt' will not be used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - if self.exclude_source_files: - self.zap_pyfiles() - - # Make the archive - make_zipfile(self.egg_output, archive_root, verbose=self.verbose, - dry_run=self.dry_run, mode=self.gen_header()) - if not self.keep_temp: - remove_tree(self.bdist_dir, dry_run=self.dry_run) - - # Add to 'Distribution.dist_files' so that the "upload" command works - getattr(self.distribution, 'dist_files', []).append( - ('bdist_egg', get_python_version(), self.egg_output)) - - def zap_pyfiles(self): - log.info("Removing .py files from temporary directory") - for base, dirs, files in walk_egg(self.bdist_dir): - for name in files: - path = os.path.join(base, name) - - if name.endswith('.py'): - log.debug("Deleting %s", path) - os.unlink(path) - - if base.endswith('__pycache__'): - path_old = path - - pattern = r'(?P<name>.+)\.(?P<magic>[^.]+)\.pyc' - m = re.match(pattern, name) - path_new = os.path.join( - base, os.pardir, m.group('name') + '.pyc') - log.info( - "Renaming file from [%s] to [%s]" - % (path_old, path_new)) - try: - os.remove(path_new) - except OSError: - pass - os.rename(path_old, path_new) - - def zip_safe(self): - safe = getattr(self.distribution, 'zip_safe', None) - if safe is not None: - return safe - log.warn("zip_safe flag not set; analyzing archive contents...") - return analyze_egg(self.bdist_dir, self.stubs) - - def gen_header(self): - epm = EntryPoint.parse_map(self.distribution.entry_points or '') - ep = epm.get('setuptools.installation', {}).get('eggsecutable') - if ep is None: - return 'w' # not an eggsecutable, do it the usual way. - - if not ep.attrs or ep.extras: - raise DistutilsSetupError( - "eggsecutable entry point (%r) cannot have 'extras' " - "or refer to a module" % (ep,) - ) - - pyver = sys.version[:3] - pkg = ep.module_name - full = '.'.join(ep.attrs) - base = ep.attrs[0] - basename = os.path.basename(self.egg_output) - - header = ( - "#!/bin/sh\n" - 'if [ `basename $0` = "%(basename)s" ]\n' - 'then exec python%(pyver)s -c "' - "import sys, os; sys.path.insert(0, os.path.abspath('$0')); " - "from %(pkg)s import %(base)s; sys.exit(%(full)s())" - '" "$@"\n' - 'else\n' - ' echo $0 is not the correct name for this egg file.\n' - ' echo Please rename it back to %(basename)s and try again.\n' - ' exec false\n' - 'fi\n' - ) % locals() - - if not self.dry_run: - mkpath(os.path.dirname(self.egg_output), dry_run=self.dry_run) - f = open(self.egg_output, 'w') - f.write(header) - f.close() - return 'a' - - def copy_metadata_to(self, target_dir): - "Copy metadata (egg info) to the target_dir" - # normalize the path (so that a forward-slash in egg_info will - # match using startswith below) - norm_egg_info = os.path.normpath(self.egg_info) - prefix = os.path.join(norm_egg_info, '') - for path in self.ei_cmd.filelist.files: - if path.startswith(prefix): - target = os.path.join(target_dir, path[len(prefix):]) - ensure_directory(target) - self.copy_file(path, target) - - def get_ext_outputs(self): - """Get a list of relative paths to C extensions in the output distro""" - - all_outputs = [] - ext_outputs = [] - - paths = {self.bdist_dir: ''} - for base, dirs, files in sorted_walk(self.bdist_dir): - for filename in files: - if os.path.splitext(filename)[1].lower() in NATIVE_EXTENSIONS: - all_outputs.append(paths[base] + filename) - for filename in dirs: - paths[os.path.join(base, filename)] = (paths[base] + - filename + '/') - - if self.distribution.has_ext_modules(): - build_cmd = self.get_finalized_command('build_ext') - for ext in build_cmd.extensions: - if isinstance(ext, Library): - continue - fullname = build_cmd.get_ext_fullname(ext.name) - filename = build_cmd.get_ext_filename(fullname) - if not os.path.basename(filename).startswith('dl-'): - if os.path.exists(os.path.join(self.bdist_dir, filename)): - ext_outputs.append(filename) - - return all_outputs, ext_outputs - - -NATIVE_EXTENSIONS = dict.fromkeys('.dll .so .dylib .pyd'.split()) - - -def walk_egg(egg_dir): - """Walk an unpacked egg's contents, skipping the metadata directory""" - walker = sorted_walk(egg_dir) - base, dirs, files = next(walker) - if 'EGG-INFO' in dirs: - dirs.remove('EGG-INFO') - yield base, dirs, files - for bdf in walker: - yield bdf - - -def analyze_egg(egg_dir, stubs): - # check for existing flag in EGG-INFO - for flag, fn in safety_flags.items(): - if os.path.exists(os.path.join(egg_dir, 'EGG-INFO', fn)): - return flag - if not can_scan(): - return False - safe = True - for base, dirs, files in walk_egg(egg_dir): - for name in files: - if name.endswith('.py') or name.endswith('.pyw'): - continue - elif name.endswith('.pyc') or name.endswith('.pyo'): - # always scan, even if we already know we're not safe - safe = scan_module(egg_dir, base, name, stubs) and safe - return safe - - -def write_safety_flag(egg_dir, safe): - # Write or remove zip safety flag file(s) - for flag, fn in safety_flags.items(): - fn = os.path.join(egg_dir, fn) - if os.path.exists(fn): - if safe is None or bool(safe) != flag: - os.unlink(fn) - elif safe is not None and bool(safe) == flag: - f = open(fn, 'wt') - f.write('\n') - f.close() - - -safety_flags = { - True: 'zip-safe', - False: 'not-zip-safe', -} - - -def scan_module(egg_dir, base, name, stubs): - """Check whether module possibly uses unsafe-for-zipfile stuff""" - - filename = os.path.join(base, name) - if filename[:-1] in stubs: - return True # Extension module - pkg = base[len(egg_dir) + 1:].replace(os.sep, '.') - module = pkg + (pkg and '.' or '') + os.path.splitext(name)[0] - if sys.version_info < (3, 3): - skip = 8 # skip magic & date - elif sys.version_info < (3, 7): - skip = 12 # skip magic & date & file size - else: - skip = 16 # skip magic & reserved? & date & file size - f = open(filename, 'rb') - f.read(skip) - code = marshal.load(f) - f.close() - safe = True - symbols = dict.fromkeys(iter_symbols(code)) - for bad in ['__file__', '__path__']: - if bad in symbols: - log.warn("%s: module references %s", module, bad) - safe = False - if 'inspect' in symbols: - for bad in [ - 'getsource', 'getabsfile', 'getsourcefile', 'getfile' - 'getsourcelines', 'findsource', 'getcomments', 'getframeinfo', - 'getinnerframes', 'getouterframes', 'stack', 'trace' - ]: - if bad in symbols: - log.warn("%s: module MAY be using inspect.%s", module, bad) - safe = False - return safe - - -def iter_symbols(code): - """Yield names and strings used by `code` and its nested code objects""" - for name in code.co_names: - yield name - for const in code.co_consts: - if isinstance(const, six.string_types): - yield const - elif isinstance(const, CodeType): - for name in iter_symbols(const): - yield name - - -def can_scan(): - if not sys.platform.startswith('java') and sys.platform != 'cli': - # CPython, PyPy, etc. - return True - log.warn("Unable to analyze compiled code on this platform.") - log.warn("Please ask the author to include a 'zip_safe'" - " setting (either True or False) in the package's setup.py") - - -# Attribute names of options for commands that might need to be convinced to -# install to the egg build directory - -INSTALL_DIRECTORY_ATTRS = [ - 'install_lib', 'install_dir', 'install_data', 'install_base' -] - - -def make_zipfile(zip_filename, base_dir, verbose=0, dry_run=0, compress=True, - mode='w'): - """Create a zip file from all the files under 'base_dir'. The output - zip file will be named 'base_dir' + ".zip". Uses either the "zipfile" - Python module (if available) or the InfoZIP "zip" utility (if installed - and found on the default search path). If neither tool is available, - raises DistutilsExecError. Returns the name of the output zip file. - """ - import zipfile - - mkpath(os.path.dirname(zip_filename), dry_run=dry_run) - log.info("creating '%s' and adding '%s' to it", zip_filename, base_dir) - - def visit(z, dirname, names): - for name in names: - path = os.path.normpath(os.path.join(dirname, name)) - if os.path.isfile(path): - p = path[len(base_dir) + 1:] - if not dry_run: - z.write(path, p) - log.debug("adding '%s'", p) - - compression = zipfile.ZIP_DEFLATED if compress else zipfile.ZIP_STORED - if not dry_run: - z = zipfile.ZipFile(zip_filename, mode, compression=compression) - for dirname, dirs, files in sorted_walk(base_dir): - visit(z, dirname, files) - z.close() - else: - for dirname, dirs, files in sorted_walk(base_dir): - visit(None, dirname, files) - return zip_filename diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_rpm.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_rpm.py deleted file mode 100644 index 7073092..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_rpm.py +++ /dev/null @@ -1,43 +0,0 @@ -import distutils.command.bdist_rpm as orig - - -class bdist_rpm(orig.bdist_rpm): - """ - Override the default bdist_rpm behavior to do the following: - - 1. Run egg_info to ensure the name and version are properly calculated. - 2. Always run 'install' using --single-version-externally-managed to - disable eggs in RPM distributions. - 3. Replace dash with underscore in the version numbers for better RPM - compatibility. - """ - - def run(self): - # ensure distro name is up-to-date - self.run_command('egg_info') - - orig.bdist_rpm.run(self) - - def _make_spec_file(self): - version = self.distribution.get_version() - rpmversion = version.replace('-', '_') - spec = orig.bdist_rpm._make_spec_file(self) - line23 = '%define version ' + version - line24 = '%define version ' + rpmversion - spec = [ - line.replace( - "Source0: %{name}-%{version}.tar", - "Source0: %{name}-%{unmangled_version}.tar" - ).replace( - "setup.py install ", - "setup.py install --single-version-externally-managed " - ).replace( - "%setup", - "%setup -n %{name}-%{unmangled_version}" - ).replace(line23, line24) - for line in spec - ] - insert_loc = spec.index(line24) + 1 - unmangled_version = "%define unmangled_version " + version - spec.insert(insert_loc, unmangled_version) - return spec diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_wininst.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_wininst.py deleted file mode 100644 index 073de97..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/bdist_wininst.py +++ /dev/null @@ -1,21 +0,0 @@ -import distutils.command.bdist_wininst as orig - - -class bdist_wininst(orig.bdist_wininst): - def reinitialize_command(self, command, reinit_subcommands=0): - """ - Supplement reinitialize_command to work around - http://bugs.python.org/issue20819 - """ - cmd = self.distribution.reinitialize_command( - command, reinit_subcommands) - if command in ('install', 'install_lib'): - cmd.install_lib = None - return cmd - - def run(self): - self._is_running = True - try: - orig.bdist_wininst.run(self) - finally: - self._is_running = False diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_clib.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_clib.py deleted file mode 100644 index 09caff6..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_clib.py +++ /dev/null @@ -1,98 +0,0 @@ -import distutils.command.build_clib as orig -from distutils.errors import DistutilsSetupError -from distutils import log -from setuptools.dep_util import newer_pairwise_group - - -class build_clib(orig.build_clib): - """ - Override the default build_clib behaviour to do the following: - - 1. Implement a rudimentary timestamp-based dependency system - so 'compile()' doesn't run every time. - 2. Add more keys to the 'build_info' dictionary: - * obj_deps - specify dependencies for each object compiled. - this should be a dictionary mapping a key - with the source filename to a list of - dependencies. Use an empty string for global - dependencies. - * cflags - specify a list of additional flags to pass to - the compiler. - """ - - def build_libraries(self, libraries): - for (lib_name, build_info) in libraries: - sources = build_info.get('sources') - if sources is None or not isinstance(sources, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'sources' must be present and must be " - "a list of source filenames" % lib_name) - sources = list(sources) - - log.info("building '%s' library", lib_name) - - # Make sure everything is the correct type. - # obj_deps should be a dictionary of keys as sources - # and a list/tuple of files that are its dependencies. - obj_deps = build_info.get('obj_deps', dict()) - if not isinstance(obj_deps, dict): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - dependencies = [] - - # Get the global dependencies that are specified by the '' key. - # These will go into every source's dependency list. - global_deps = obj_deps.get('', list()) - if not isinstance(global_deps, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - - # Build the list to be used by newer_pairwise_group - # each source will be auto-added to its dependencies. - for source in sources: - src_deps = [source] - src_deps.extend(global_deps) - extra_deps = obj_deps.get(source, list()) - if not isinstance(extra_deps, (list, tuple)): - raise DistutilsSetupError( - "in 'libraries' option (library '%s'), " - "'obj_deps' must be a dictionary of " - "type 'source: list'" % lib_name) - src_deps.extend(extra_deps) - dependencies.append(src_deps) - - expected_objects = self.compiler.object_filenames( - sources, - output_dir=self.build_temp - ) - - if newer_pairwise_group(dependencies, expected_objects) != ([], []): - # First, compile the source code to object files in the library - # directory. (This should probably change to putting object - # files in a temporary build directory.) - macros = build_info.get('macros') - include_dirs = build_info.get('include_dirs') - cflags = build_info.get('cflags') - objects = self.compiler.compile( - sources, - output_dir=self.build_temp, - macros=macros, - include_dirs=include_dirs, - extra_postargs=cflags, - debug=self.debug - ) - - # Now "link" the object files together into a static library. - # (On Unix at least, this isn't really linking -- it just - # builds an archive. Whatever.) - self.compiler.create_static_lib( - expected_objects, - lib_name, - output_dir=self.build_clib, - debug=self.debug - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_ext.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_ext.py deleted file mode 100644 index ea97b37..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_ext.py +++ /dev/null @@ -1,331 +0,0 @@ -import os -import sys -import itertools -import imp -from distutils.command.build_ext import build_ext as _du_build_ext -from distutils.file_util import copy_file -from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler, get_config_var -from distutils.errors import DistutilsError -from distutils import log - -from setuptools.extension import Library -from setuptools.extern import six - -try: - # Attempt to use Cython for building extensions, if available - from Cython.Distutils.build_ext import build_ext as _build_ext - # Additionally, assert that the compiler module will load - # also. Ref #1229. - __import__('Cython.Compiler.Main') -except ImportError: - _build_ext = _du_build_ext - -# make sure _config_vars is initialized -get_config_var("LDSHARED") -from distutils.sysconfig import _config_vars as _CONFIG_VARS - - -def _customize_compiler_for_shlib(compiler): - if sys.platform == "darwin": - # building .dylib requires additional compiler flags on OSX; here we - # temporarily substitute the pyconfig.h variables so that distutils' - # 'customize_compiler' uses them before we build the shared libraries. - tmp = _CONFIG_VARS.copy() - try: - # XXX Help! I don't have any idea whether these are right... - _CONFIG_VARS['LDSHARED'] = ( - "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") - _CONFIG_VARS['CCSHARED'] = " -dynamiclib" - _CONFIG_VARS['SO'] = ".dylib" - customize_compiler(compiler) - finally: - _CONFIG_VARS.clear() - _CONFIG_VARS.update(tmp) - else: - customize_compiler(compiler) - - -have_rtld = False -use_stubs = False -libtype = 'shared' - -if sys.platform == "darwin": - use_stubs = True -elif os.name != 'nt': - try: - import dl - use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW') - except ImportError: - pass - -if_dl = lambda s: s if have_rtld else '' - - -def get_abi3_suffix(): - """Return the file extension for an abi3-compliant Extension()""" - for suffix, _, _ in (s for s in imp.get_suffixes() if s[2] == imp.C_EXTENSION): - if '.abi3' in suffix: # Unix - return suffix - elif suffix == '.pyd': # Windows - return suffix - - -class build_ext(_build_ext): - def run(self): - """Build extensions in build directory, then copy if --inplace""" - old_inplace, self.inplace = self.inplace, 0 - _build_ext.run(self) - self.inplace = old_inplace - if old_inplace: - self.copy_extensions_to_source() - - def copy_extensions_to_source(self): - build_py = self.get_finalized_command('build_py') - for ext in self.extensions: - fullname = self.get_ext_fullname(ext.name) - filename = self.get_ext_filename(fullname) - modpath = fullname.split('.') - package = '.'.join(modpath[:-1]) - package_dir = build_py.get_package_dir(package) - dest_filename = os.path.join(package_dir, - os.path.basename(filename)) - src_filename = os.path.join(self.build_lib, filename) - - # Always copy, even if source is older than destination, to ensure - # that the right extensions for the current Python/platform are - # used. - copy_file( - src_filename, dest_filename, verbose=self.verbose, - dry_run=self.dry_run - ) - if ext._needs_stub: - self.write_stub(package_dir or os.curdir, ext, True) - - def get_ext_filename(self, fullname): - filename = _build_ext.get_ext_filename(self, fullname) - if fullname in self.ext_map: - ext = self.ext_map[fullname] - use_abi3 = ( - six.PY3 - and getattr(ext, 'py_limited_api') - and get_abi3_suffix() - ) - if use_abi3: - so_ext = _get_config_var_837('EXT_SUFFIX') - filename = filename[:-len(so_ext)] - filename = filename + get_abi3_suffix() - if isinstance(ext, Library): - fn, ext = os.path.splitext(filename) - return self.shlib_compiler.library_filename(fn, libtype) - elif use_stubs and ext._links_to_dynamic: - d, fn = os.path.split(filename) - return os.path.join(d, 'dl-' + fn) - return filename - - def initialize_options(self): - _build_ext.initialize_options(self) - self.shlib_compiler = None - self.shlibs = [] - self.ext_map = {} - - def finalize_options(self): - _build_ext.finalize_options(self) - self.extensions = self.extensions or [] - self.check_extensions_list(self.extensions) - self.shlibs = [ext for ext in self.extensions - if isinstance(ext, Library)] - if self.shlibs: - self.setup_shlib_compiler() - for ext in self.extensions: - ext._full_name = self.get_ext_fullname(ext.name) - for ext in self.extensions: - fullname = ext._full_name - self.ext_map[fullname] = ext - - # distutils 3.1 will also ask for module names - # XXX what to do with conflicts? - self.ext_map[fullname.split('.')[-1]] = ext - - ltd = self.shlibs and self.links_to_dynamic(ext) or False - ns = ltd and use_stubs and not isinstance(ext, Library) - ext._links_to_dynamic = ltd - ext._needs_stub = ns - filename = ext._file_name = self.get_ext_filename(fullname) - libdir = os.path.dirname(os.path.join(self.build_lib, filename)) - if ltd and libdir not in ext.library_dirs: - ext.library_dirs.append(libdir) - if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: - ext.runtime_library_dirs.append(os.curdir) - - def setup_shlib_compiler(self): - compiler = self.shlib_compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - _customize_compiler_for_shlib(compiler) - - if self.include_dirs is not None: - compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - compiler.undefine_macro(macro) - if self.libraries is not None: - compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - compiler.set_link_objects(self.link_objects) - - # hack so distutils' build_extension() builds a library instead - compiler.link_shared_object = link_shared_object.__get__(compiler) - - def get_export_symbols(self, ext): - if isinstance(ext, Library): - return ext.export_symbols - return _build_ext.get_export_symbols(self, ext) - - def build_extension(self, ext): - ext._convert_pyx_sources_to_lang() - _compiler = self.compiler - try: - if isinstance(ext, Library): - self.compiler = self.shlib_compiler - _build_ext.build_extension(self, ext) - if ext._needs_stub: - cmd = self.get_finalized_command('build_py').build_lib - self.write_stub(cmd, ext) - finally: - self.compiler = _compiler - - def links_to_dynamic(self, ext): - """Return true if 'ext' links to a dynamic lib in the same package""" - # XXX this should check to ensure the lib is actually being built - # XXX as dynamic, and not just using a locally-found version or a - # XXX static-compiled version - libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) - pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) - return any(pkg + libname in libnames for libname in ext.libraries) - - def get_outputs(self): - return _build_ext.get_outputs(self) + self.__get_stubs_outputs() - - def __get_stubs_outputs(self): - # assemble the base name for each extension that needs a stub - ns_ext_bases = ( - os.path.join(self.build_lib, *ext._full_name.split('.')) - for ext in self.extensions - if ext._needs_stub - ) - # pair each base with the extension - pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) - return list(base + fnext for base, fnext in pairs) - - def __get_output_extensions(self): - yield '.py' - yield '.pyc' - if self.get_finalized_command('build_py').optimize: - yield '.pyo' - - def write_stub(self, output_dir, ext, compile=False): - log.info("writing stub loader for %s to %s", ext._full_name, - output_dir) - stub_file = (os.path.join(output_dir, *ext._full_name.split('.')) + - '.py') - if compile and os.path.exists(stub_file): - raise DistutilsError(stub_file + " already exists! Please delete.") - if not self.dry_run: - f = open(stub_file, 'w') - f.write( - '\n'.join([ - "def __bootstrap__():", - " global __bootstrap__, __file__, __loader__", - " import sys, os, pkg_resources, imp" + if_dl(", dl"), - " __file__ = pkg_resources.resource_filename" - "(__name__,%r)" - % os.path.basename(ext._file_name), - " del __bootstrap__", - " if '__loader__' in globals():", - " del __loader__", - if_dl(" old_flags = sys.getdlopenflags()"), - " old_dir = os.getcwd()", - " try:", - " os.chdir(os.path.dirname(__file__))", - if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), - " imp.load_dynamic(__name__,__file__)", - " finally:", - if_dl(" sys.setdlopenflags(old_flags)"), - " os.chdir(old_dir)", - "__bootstrap__()", - "" # terminal \n - ]) - ) - f.close() - if compile: - from distutils.util import byte_compile - - byte_compile([stub_file], optimize=0, - force=True, dry_run=self.dry_run) - optimize = self.get_finalized_command('install_lib').optimize - if optimize > 0: - byte_compile([stub_file], optimize=optimize, - force=True, dry_run=self.dry_run) - if os.path.exists(stub_file) and not self.dry_run: - os.unlink(stub_file) - - -if use_stubs or os.name == 'nt': - # Build shared libraries - # - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - self.link( - self.SHARED_LIBRARY, objects, output_libname, - output_dir, libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, extra_preargs, extra_postargs, - build_temp, target_lang - ) -else: - # Build static libraries everywhere else - libtype = 'static' - - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - # XXX we need to either disallow these attrs on Library instances, - # or warn/abort here if set, or something... - # libraries=None, library_dirs=None, runtime_library_dirs=None, - # export_symbols=None, extra_preargs=None, extra_postargs=None, - # build_temp=None - - assert output_dir is None # distutils build_ext doesn't pass this - output_dir, filename = os.path.split(output_libname) - basename, ext = os.path.splitext(filename) - if self.library_filename("x").startswith('lib'): - # strip 'lib' prefix; this is kludgy if some platform uses - # a different prefix - basename = basename[3:] - - self.create_static_lib( - objects, basename, output_dir, debug, target_lang - ) - - -def _get_config_var_837(name): - """ - In https://github.com/pypa/setuptools/pull/837, we discovered - Python 3.3.0 exposes the extension suffix under the name 'SO'. - """ - if sys.version_info < (3, 3, 1): - name = 'SO' - return get_config_var(name) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_py.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_py.py deleted file mode 100644 index b0314fd..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/build_py.py +++ /dev/null @@ -1,270 +0,0 @@ -from glob import glob -from distutils.util import convert_path -import distutils.command.build_py as orig -import os -import fnmatch -import textwrap -import io -import distutils.errors -import itertools - -from setuptools.extern import six -from setuptools.extern.six.moves import map, filter, filterfalse - -try: - from setuptools.lib2to3_ex import Mixin2to3 -except ImportError: - - class Mixin2to3: - def run_2to3(self, files, doctests=True): - "do nothing" - - -class build_py(orig.build_py, Mixin2to3): - """Enhanced 'build_py' command that includes data files with packages - - The data files are specified via a 'package_data' argument to 'setup()'. - See 'setuptools.dist.Distribution' for more details. - - Also, this version of the 'build_py' command allows you to specify both - 'py_modules' and 'packages' in the same setup operation. - """ - - def finalize_options(self): - orig.build_py.finalize_options(self) - self.package_data = self.distribution.package_data - self.exclude_package_data = (self.distribution.exclude_package_data or - {}) - if 'data_files' in self.__dict__: - del self.__dict__['data_files'] - self.__updated_files = [] - self.__doctests_2to3 = [] - - def run(self): - """Build modules, packages, and copy data files to build directory""" - if not self.py_modules and not self.packages: - return - - if self.py_modules: - self.build_modules() - - if self.packages: - self.build_packages() - self.build_package_data() - - self.run_2to3(self.__updated_files, False) - self.run_2to3(self.__updated_files, True) - self.run_2to3(self.__doctests_2to3, True) - - # Only compile actual .py files, using our base class' idea of what our - # output files are. - self.byte_compile(orig.build_py.get_outputs(self, include_bytecode=0)) - - def __getattr__(self, attr): - "lazily compute data files" - if attr == 'data_files': - self.data_files = self._get_data_files() - return self.data_files - return orig.build_py.__getattr__(self, attr) - - def build_module(self, module, module_file, package): - if six.PY2 and isinstance(package, six.string_types): - # avoid errors on Python 2 when unicode is passed (#190) - package = package.split('.') - outfile, copied = orig.build_py.build_module(self, module, module_file, - package) - if copied: - self.__updated_files.append(outfile) - return outfile, copied - - def _get_data_files(self): - """Generate list of '(package,src_dir,build_dir,filenames)' tuples""" - self.analyze_manifest() - return list(map(self._get_pkg_data_files, self.packages or ())) - - def _get_pkg_data_files(self, package): - # Locate package source directory - src_dir = self.get_package_dir(package) - - # Compute package build directory - build_dir = os.path.join(*([self.build_lib] + package.split('.'))) - - # Strip directory from globbed filenames - filenames = [ - os.path.relpath(file, src_dir) - for file in self.find_data_files(package, src_dir) - ] - return package, src_dir, build_dir, filenames - - def find_data_files(self, package, src_dir): - """Return filenames for package's data files in 'src_dir'""" - patterns = self._get_platform_patterns( - self.package_data, - package, - src_dir, - ) - globs_expanded = map(glob, patterns) - # flatten the expanded globs into an iterable of matches - globs_matches = itertools.chain.from_iterable(globs_expanded) - glob_files = filter(os.path.isfile, globs_matches) - files = itertools.chain( - self.manifest_files.get(package, []), - glob_files, - ) - return self.exclude_data_files(package, src_dir, files) - - def build_package_data(self): - """Copy data files into build directory""" - for package, src_dir, build_dir, filenames in self.data_files: - for filename in filenames: - target = os.path.join(build_dir, filename) - self.mkpath(os.path.dirname(target)) - srcfile = os.path.join(src_dir, filename) - outf, copied = self.copy_file(srcfile, target) - srcfile = os.path.abspath(srcfile) - if (copied and - srcfile in self.distribution.convert_2to3_doctests): - self.__doctests_2to3.append(outf) - - def analyze_manifest(self): - self.manifest_files = mf = {} - if not self.distribution.include_package_data: - return - src_dirs = {} - for package in self.packages or (): - # Locate package source directory - src_dirs[assert_relative(self.get_package_dir(package))] = package - - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - for path in ei_cmd.filelist.files: - d, f = os.path.split(assert_relative(path)) - prev = None - oldf = f - while d and d != prev and d not in src_dirs: - prev = d - d, df = os.path.split(d) - f = os.path.join(df, f) - if d in src_dirs: - if path.endswith('.py') and f == oldf: - continue # it's a module, not data - mf.setdefault(src_dirs[d], []).append(path) - - def get_data_files(self): - pass # Lazily compute data files in _get_data_files() function. - - def check_package(self, package, package_dir): - """Check namespace packages' __init__ for declare_namespace""" - try: - return self.packages_checked[package] - except KeyError: - pass - - init_py = orig.build_py.check_package(self, package, package_dir) - self.packages_checked[package] = init_py - - if not init_py or not self.distribution.namespace_packages: - return init_py - - for pkg in self.distribution.namespace_packages: - if pkg == package or pkg.startswith(package + '.'): - break - else: - return init_py - - with io.open(init_py, 'rb') as f: - contents = f.read() - if b'declare_namespace' not in contents: - raise distutils.errors.DistutilsError( - "Namespace package problem: %s is a namespace package, but " - "its\n__init__.py does not call declare_namespace()! Please " - 'fix it.\n(See the setuptools manual under ' - '"Namespace Packages" for details.)\n"' % (package,) - ) - return init_py - - def initialize_options(self): - self.packages_checked = {} - orig.build_py.initialize_options(self) - - def get_package_dir(self, package): - res = orig.build_py.get_package_dir(self, package) - if self.distribution.src_root is not None: - return os.path.join(self.distribution.src_root, res) - return res - - def exclude_data_files(self, package, src_dir, files): - """Filter filenames for package's data files in 'src_dir'""" - files = list(files) - patterns = self._get_platform_patterns( - self.exclude_package_data, - package, - src_dir, - ) - match_groups = ( - fnmatch.filter(files, pattern) - for pattern in patterns - ) - # flatten the groups of matches into an iterable of matches - matches = itertools.chain.from_iterable(match_groups) - bad = set(matches) - keepers = ( - fn - for fn in files - if fn not in bad - ) - # ditch dupes - return list(_unique_everseen(keepers)) - - @staticmethod - def _get_platform_patterns(spec, package, src_dir): - """ - yield platform-specific path patterns (suitable for glob - or fn_match) from a glob-based spec (such as - self.package_data or self.exclude_package_data) - matching package in src_dir. - """ - raw_patterns = itertools.chain( - spec.get('', []), - spec.get(package, []), - ) - return ( - # Each pattern has to be converted to a platform-specific path - os.path.join(src_dir, convert_path(pattern)) - for pattern in raw_patterns - ) - - -# from Python docs -def _unique_everseen(iterable, key=None): - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element - - -def assert_relative(path): - if not os.path.isabs(path): - return path - from distutils.errors import DistutilsSetupError - - msg = textwrap.dedent(""" - Error: setup script specifies an absolute path: - - %s - - setup() arguments must *always* be /-separated paths relative to the - setup.py directory, *never* absolute paths. - """).lstrip() % path - raise DistutilsSetupError(msg) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/develop.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/develop.py deleted file mode 100644 index 959c932..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/develop.py +++ /dev/null @@ -1,216 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsError, DistutilsOptionError -import os -import glob -import io - -from setuptools.extern import six - -from pkg_resources import Distribution, PathMetadata, normalize_path -from setuptools.command.easy_install import easy_install -from setuptools import namespaces -import setuptools - - -class develop(namespaces.DevelopInstaller, easy_install): - """Set up package for development""" - - description = "install package in 'development mode'" - - user_options = easy_install.user_options + [ - ("uninstall", "u", "Uninstall this source package"), - ("egg-path=", None, "Set the path to be used in the .egg-link file"), - ] - - boolean_options = easy_install.boolean_options + ['uninstall'] - - command_consumes_arguments = False # override base - - def run(self): - if self.uninstall: - self.multi_version = True - self.uninstall_link() - self.uninstall_namespaces() - else: - self.install_for_development() - self.warn_deprecated_options() - - def initialize_options(self): - self.uninstall = None - self.egg_path = None - easy_install.initialize_options(self) - self.setup_path = None - self.always_copy_from = '.' # always copy eggs installed in curdir - - def finalize_options(self): - ei = self.get_finalized_command("egg_info") - if ei.broken_egg_info: - template = "Please rename %r to %r before using 'develop'" - args = ei.egg_info, ei.broken_egg_info - raise DistutilsError(template % args) - self.args = [ei.egg_name] - - easy_install.finalize_options(self) - self.expand_basedirs() - self.expand_dirs() - # pick up setup-dir .egg files only: no .egg-info - self.package_index.scan(glob.glob('*.egg')) - - egg_link_fn = ei.egg_name + '.egg-link' - self.egg_link = os.path.join(self.install_dir, egg_link_fn) - self.egg_base = ei.egg_base - if self.egg_path is None: - self.egg_path = os.path.abspath(ei.egg_base) - - target = normalize_path(self.egg_base) - egg_path = normalize_path(os.path.join(self.install_dir, - self.egg_path)) - if egg_path != target: - raise DistutilsOptionError( - "--egg-path must be a relative path from the install" - " directory to " + target - ) - - # Make a distribution for the package's source - self.dist = Distribution( - target, - PathMetadata(target, os.path.abspath(ei.egg_info)), - project_name=ei.egg_name - ) - - self.setup_path = self._resolve_setup_path( - self.egg_base, - self.install_dir, - self.egg_path, - ) - - @staticmethod - def _resolve_setup_path(egg_base, install_dir, egg_path): - """ - Generate a path from egg_base back to '.' where the - setup script resides and ensure that path points to the - setup path from $install_dir/$egg_path. - """ - path_to_setup = egg_base.replace(os.sep, '/').rstrip('/') - if path_to_setup != os.curdir: - path_to_setup = '../' * (path_to_setup.count('/') + 1) - resolved = normalize_path( - os.path.join(install_dir, egg_path, path_to_setup) - ) - if resolved != normalize_path(os.curdir): - raise DistutilsOptionError( - "Can't get a consistent path to setup script from" - " installation directory", resolved, normalize_path(os.curdir)) - return path_to_setup - - def install_for_development(self): - if six.PY3 and getattr(self.distribution, 'use_2to3', False): - # If we run 2to3 we can not do this inplace: - - # Ensure metadata is up-to-date - self.reinitialize_command('build_py', inplace=0) - self.run_command('build_py') - bpy_cmd = self.get_finalized_command("build_py") - build_path = normalize_path(bpy_cmd.build_lib) - - # Build extensions - self.reinitialize_command('egg_info', egg_base=build_path) - self.run_command('egg_info') - - self.reinitialize_command('build_ext', inplace=0) - self.run_command('build_ext') - - # Fixup egg-link and easy-install.pth - ei_cmd = self.get_finalized_command("egg_info") - self.egg_path = build_path - self.dist.location = build_path - # XXX - self.dist._provider = PathMetadata(build_path, ei_cmd.egg_info) - else: - # Without 2to3 inplace works fine: - self.run_command('egg_info') - - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - - self.install_site_py() # ensure that target dir is site-safe - if setuptools.bootstrap_install_from: - self.easy_install(setuptools.bootstrap_install_from) - setuptools.bootstrap_install_from = None - - self.install_namespaces() - - # create an .egg-link in the installation dir, pointing to our egg - log.info("Creating %s (link to %s)", self.egg_link, self.egg_base) - if not self.dry_run: - with open(self.egg_link, "w") as f: - f.write(self.egg_path + "\n" + self.setup_path) - # postprocess the installed distro, fixing up .pth, installing scripts, - # and handling requirements - self.process_distribution(None, self.dist, not self.no_deps) - - def uninstall_link(self): - if os.path.exists(self.egg_link): - log.info("Removing %s (link to %s)", self.egg_link, self.egg_base) - egg_link_file = open(self.egg_link) - contents = [line.rstrip() for line in egg_link_file] - egg_link_file.close() - if contents not in ([self.egg_path], - [self.egg_path, self.setup_path]): - log.warn("Link points to %s: uninstall aborted", contents) - return - if not self.dry_run: - os.unlink(self.egg_link) - if not self.dry_run: - self.update_pth(self.dist) # remove any .pth link to us - if self.distribution.scripts: - # XXX should also check for entry point scripts! - log.warn("Note: you must uninstall or replace scripts manually!") - - def install_egg_scripts(self, dist): - if dist is not self.dist: - # Installing a dependency, so fall back to normal behavior - return easy_install.install_egg_scripts(self, dist) - - # create wrapper scripts in the script dir, pointing to dist.scripts - - # new-style... - self.install_wrapper_scripts(dist) - - # ...and old-style - for script_name in self.distribution.scripts or []: - script_path = os.path.abspath(convert_path(script_name)) - script_name = os.path.basename(script_path) - with io.open(script_path) as strm: - script_text = strm.read() - self.install_script(dist, script_name, script_text, script_path) - - def install_wrapper_scripts(self, dist): - dist = VersionlessRequirement(dist) - return easy_install.install_wrapper_scripts(self, dist) - - -class VersionlessRequirement(object): - """ - Adapt a pkg_resources.Distribution to simply return the project - name as the 'requirement' so that scripts will work across - multiple versions. - - >>> dist = Distribution(project_name='foo', version='1.0') - >>> str(dist.as_requirement()) - 'foo==1.0' - >>> adapted_dist = VersionlessRequirement(dist) - >>> str(adapted_dist.as_requirement()) - 'foo' - """ - - def __init__(self, dist): - self.__dist = dist - - def __getattr__(self, name): - return getattr(self.__dist, name) - - def as_requirement(self): - return self.project_name diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/dist_info.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/dist_info.py deleted file mode 100644 index c45258f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/dist_info.py +++ /dev/null @@ -1,36 +0,0 @@ -""" -Create a dist_info directory -As defined in the wheel specification -""" - -import os - -from distutils.core import Command -from distutils import log - - -class dist_info(Command): - - description = 'create a .dist-info directory' - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ] - - def initialize_options(self): - self.egg_base = None - - def finalize_options(self): - pass - - def run(self): - egg_info = self.get_finalized_command('egg_info') - egg_info.egg_base = self.egg_base - egg_info.finalize_options() - egg_info.run() - dist_info_dir = egg_info.egg_info[:-len('.egg-info')] + '.dist-info' - log.info("creating '{}'".format(os.path.abspath(dist_info_dir))) - - bdist_wheel = self.get_finalized_command('bdist_wheel') - bdist_wheel.egg2dist(egg_info.egg_info, dist_info_dir) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/easy_install.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/easy_install.py deleted file mode 100644 index c9fa55d..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/easy_install.py +++ /dev/null @@ -1,2389 +0,0 @@ -#!/usr/bin/env python -""" -Easy Install ------------- - -A tool for doing automatic download/extract/build of distutils-based Python -packages. For detailed documentation, see the accompanying EasyInstall.txt -file, or visit the `EasyInstall home page`__. - -__ https://setuptools.readthedocs.io/en/latest/easy_install.html - -""" - -from glob import glob -from distutils.util import get_platform -from distutils.util import convert_path, subst_vars -from distutils.errors import ( - DistutilsArgError, DistutilsOptionError, - DistutilsError, DistutilsPlatformError, -) -from distutils.command.install import INSTALL_SCHEMES, SCHEME_KEYS -from distutils import log, dir_util -from distutils.command.build_scripts import first_line_re -from distutils.spawn import find_executable -import sys -import os -import zipimport -import shutil -import tempfile -import zipfile -import re -import stat -import random -import textwrap -import warnings -import site -import struct -import contextlib -import subprocess -import shlex -import io - -from setuptools.extern import six -from setuptools.extern.six.moves import configparser, map - -from setuptools import Command -from setuptools.sandbox import run_setup -from setuptools.py31compat import get_path, get_config_vars -from setuptools.py27compat import rmtree_safe -from setuptools.command import setopt -from setuptools.archive_util import unpack_archive -from setuptools.package_index import ( - PackageIndex, parse_requirement_arg, URL_SCHEME, -) -from setuptools.command import bdist_egg, egg_info -from setuptools.wheel import Wheel -from pkg_resources import ( - yield_lines, normalize_path, resource_string, ensure_directory, - get_distribution, find_distributions, Environment, Requirement, - Distribution, PathMetadata, EggMetadata, WorkingSet, DistributionNotFound, - VersionConflict, DEVELOP_DIST, -) -import pkg_resources.py31compat - -# Turn on PEP440Warnings -warnings.filterwarnings("default", category=pkg_resources.PEP440Warning) - -__all__ = [ - 'samefile', 'easy_install', 'PthDistributions', 'extract_wininst_cfg', - 'main', 'get_exe_prefixes', -] - - -def is_64bit(): - return struct.calcsize("P") == 8 - - -def samefile(p1, p2): - """ - Determine if two paths reference the same file. - - Augments os.path.samefile to work on Windows and - suppresses errors if the path doesn't exist. - """ - both_exist = os.path.exists(p1) and os.path.exists(p2) - use_samefile = hasattr(os.path, 'samefile') and both_exist - if use_samefile: - return os.path.samefile(p1, p2) - norm_p1 = os.path.normpath(os.path.normcase(p1)) - norm_p2 = os.path.normpath(os.path.normcase(p2)) - return norm_p1 == norm_p2 - - -if six.PY2: - - def _to_ascii(s): - return s - - def isascii(s): - try: - six.text_type(s, 'ascii') - return True - except UnicodeError: - return False -else: - - def _to_ascii(s): - return s.encode('ascii') - - def isascii(s): - try: - s.encode('ascii') - return True - except UnicodeError: - return False - - -_one_liner = lambda text: textwrap.dedent(text).strip().replace('\n', '; ') - - -class easy_install(Command): - """Manage a download/build/install process""" - description = "Find/get/install Python packages" - command_consumes_arguments = True - - user_options = [ - ('prefix=', None, "installation prefix"), - ("zip-ok", "z", "install package as a zipfile"), - ("multi-version", "m", "make apps have to require() a version"), - ("upgrade", "U", "force upgrade (searches PyPI for latest versions)"), - ("install-dir=", "d", "install package to DIR"), - ("script-dir=", "s", "install scripts to DIR"), - ("exclude-scripts", "x", "Don't install scripts"), - ("always-copy", "a", "Copy all needed packages to install dir"), - ("index-url=", "i", "base URL of Python Package Index"), - ("find-links=", "f", "additional URL(s) to search for packages"), - ("build-directory=", "b", - "download/extract/build in DIR; keep the results"), - ('optimize=', 'O', - "also compile with optimization: -O1 for \"python -O\", " - "-O2 for \"python -OO\", and -O0 to disable [default: -O0]"), - ('record=', None, - "filename in which to record list of installed files"), - ('always-unzip', 'Z', "don't install as a zipfile, no matter what"), - ('site-dirs=', 'S', "list of directories where .pth files work"), - ('editable', 'e', "Install specified packages in editable form"), - ('no-deps', 'N', "don't install dependencies"), - ('allow-hosts=', 'H', "pattern(s) that hostnames must match"), - ('local-snapshots-ok', 'l', - "allow building eggs from local checkouts"), - ('version', None, "print version information and exit"), - ('install-layout=', None, "installation layout to choose (known values: deb)"), - ('force-installation-into-system-dir', '0', "force installation into /usr"), - ('no-find-links', None, - "Don't load find-links defined in packages being installed") - ] - boolean_options = [ - 'zip-ok', 'multi-version', 'exclude-scripts', 'upgrade', 'always-copy', - 'editable', - 'no-deps', 'local-snapshots-ok', 'version', 'force-installation-into-system-dir' - ] - - if site.ENABLE_USER_SITE: - help_msg = "install in user site-package '%s'" % site.USER_SITE - user_options.append(('user', None, help_msg)) - boolean_options.append('user') - - negative_opt = {'always-unzip': 'zip-ok'} - create_index = PackageIndex - - def initialize_options(self): - # the --user option seems to be an opt-in one, - # so the default should be False. - self.user = 0 - self.zip_ok = self.local_snapshots_ok = None - self.install_dir = self.script_dir = self.exclude_scripts = None - self.index_url = None - self.find_links = None - self.build_directory = None - self.args = None - self.optimize = self.record = None - self.upgrade = self.always_copy = self.multi_version = None - self.editable = self.no_deps = self.allow_hosts = None - self.root = self.prefix = self.no_report = None - self.version = None - self.install_purelib = None # for pure module distributions - self.install_platlib = None # non-pure (dists w/ extensions) - self.install_headers = None # for C/C++ headers - self.install_lib = None # set to either purelib or platlib - self.install_scripts = None - self.install_data = None - self.install_base = None - self.install_platbase = None - if site.ENABLE_USER_SITE: - self.install_userbase = site.USER_BASE - self.install_usersite = site.USER_SITE - else: - self.install_userbase = None - self.install_usersite = None - self.no_find_links = None - - # Options not specifiable via command line - self.package_index = None - self.pth_file = self.always_copy_from = None - self.site_dirs = None - self.installed_projects = {} - self.sitepy_installed = False - # enable custom installation, known values: deb - self.install_layout = None - self.force_installation_into_system_dir = None - self.multiarch = None - - # Always read easy_install options, even if we are subclassed, or have - # an independent instance created. This ensures that defaults will - # always come from the standard configuration file(s)' "easy_install" - # section, even if this is a "develop" or "install" command, or some - # other embedding. - self._dry_run = None - self.verbose = self.distribution.verbose - self.distribution._set_command_options( - self, self.distribution.get_option_dict('easy_install') - ) - - def delete_blockers(self, blockers): - extant_blockers = ( - filename for filename in blockers - if os.path.exists(filename) or os.path.islink(filename) - ) - list(map(self._delete_path, extant_blockers)) - - def _delete_path(self, path): - log.info("Deleting %s", path) - if self.dry_run: - return - - is_tree = os.path.isdir(path) and not os.path.islink(path) - remover = rmtree if is_tree else os.unlink - remover(path) - - @staticmethod - def _render_version(): - """ - Render the Setuptools version and installation details, then exit. - """ - ver = sys.version[:3] - dist = get_distribution('setuptools') - tmpl = 'setuptools {dist.version} from {dist.location} (Python {ver})' - print(tmpl.format(**locals())) - raise SystemExit() - - def finalize_options(self): - self.version and self._render_version() - - py_version = sys.version.split()[0] - prefix, exec_prefix = get_config_vars('prefix', 'exec_prefix') - - self.config_vars = { - 'dist_name': self.distribution.get_name(), - 'dist_version': self.distribution.get_version(), - 'dist_fullname': self.distribution.get_fullname(), - 'py_version': py_version, - 'py_version_short': py_version[0:3], - 'py_version_nodot': py_version[0] + py_version[2], - 'sys_prefix': prefix, - 'prefix': prefix, - 'sys_exec_prefix': exec_prefix, - 'exec_prefix': exec_prefix, - # Only python 3.2+ has abiflags - 'abiflags': getattr(sys, 'abiflags', ''), - } - - if site.ENABLE_USER_SITE: - self.config_vars['userbase'] = self.install_userbase - self.config_vars['usersite'] = self.install_usersite - - self._fix_install_dir_for_user_site() - - self.expand_basedirs() - self.expand_dirs() - - if self.install_layout: - if not self.install_layout.lower() in ['deb']: - raise DistutilsOptionError("unknown value for --install-layout") - self.install_layout = self.install_layout.lower() - - import sysconfig - if sys.version_info[:2] >= (3, 3): - self.multiarch = sysconfig.get_config_var('MULTIARCH') - - self._expand( - 'install_dir', 'script_dir', 'build_directory', - 'site_dirs', - ) - # If a non-default installation directory was specified, default the - # script directory to match it. - if self.script_dir is None: - self.script_dir = self.install_dir - - if self.no_find_links is None: - self.no_find_links = False - - # Let install_dir get set by install_lib command, which in turn - # gets its info from the install command, and takes into account - # --prefix and --home and all that other crud. - self.set_undefined_options( - 'install_lib', ('install_dir', 'install_dir') - ) - # Likewise, set default script_dir from 'install_scripts.install_dir' - self.set_undefined_options( - 'install_scripts', ('install_dir', 'script_dir') - ) - - if self.user and self.install_purelib: - self.install_dir = self.install_purelib - self.script_dir = self.install_scripts - - if self.prefix == '/usr' and not self.force_installation_into_system_dir: - raise DistutilsOptionError("""installation into /usr - -Trying to install into the system managed parts of the file system. Please -consider to install to another location, or use the option ---force-installation-into-system-dir to overwrite this warning. -""") - - # default --record from the install command - self.set_undefined_options('install', ('record', 'record')) - # Should this be moved to the if statement below? It's not used - # elsewhere - normpath = map(normalize_path, sys.path) - self.all_site_dirs = get_site_dirs() - if self.site_dirs is not None: - site_dirs = [ - os.path.expanduser(s.strip()) for s in - self.site_dirs.split(',') - ] - for d in site_dirs: - if not os.path.isdir(d): - log.warn("%s (in --site-dirs) does not exist", d) - elif normalize_path(d) not in normpath: - raise DistutilsOptionError( - d + " (in --site-dirs) is not on sys.path" - ) - else: - self.all_site_dirs.append(normalize_path(d)) - if not self.editable: - self.check_site_dir() - self.index_url = self.index_url or "https://pypi.python.org/simple" - self.shadow_path = self.all_site_dirs[:] - for path_item in self.install_dir, normalize_path(self.script_dir): - if path_item not in self.shadow_path: - self.shadow_path.insert(0, path_item) - - if self.allow_hosts is not None: - hosts = [s.strip() for s in self.allow_hosts.split(',')] - else: - hosts = ['*'] - if self.package_index is None: - self.package_index = self.create_index( - self.index_url, search_path=self.shadow_path, hosts=hosts, - ) - self.local_index = Environment(self.shadow_path + sys.path) - - if self.find_links is not None: - if isinstance(self.find_links, six.string_types): - self.find_links = self.find_links.split() - else: - self.find_links = [] - if self.local_snapshots_ok: - self.package_index.scan_egg_links(self.shadow_path + sys.path) - if not self.no_find_links: - self.package_index.add_find_links(self.find_links) - self.set_undefined_options('install_lib', ('optimize', 'optimize')) - if not isinstance(self.optimize, int): - try: - self.optimize = int(self.optimize) - if not (0 <= self.optimize <= 2): - raise ValueError - except ValueError: - raise DistutilsOptionError("--optimize must be 0, 1, or 2") - - if self.editable and not self.build_directory: - raise DistutilsArgError( - "Must specify a build directory (-b) when using --editable" - ) - if not self.args: - raise DistutilsArgError( - "No urls, filenames, or requirements specified (see --help)") - - self.outputs = [] - - def _fix_install_dir_for_user_site(self): - """ - Fix the install_dir if "--user" was used. - """ - if not self.user or not site.ENABLE_USER_SITE: - return - - self.create_home_path() - if self.install_userbase is None: - msg = "User base directory is not specified" - raise DistutilsPlatformError(msg) - self.install_base = self.install_platbase = self.install_userbase - scheme_name = os.name.replace('posix', 'unix') + '_user' - self.select_scheme(scheme_name) - - def _expand_attrs(self, attrs): - for attr in attrs: - val = getattr(self, attr) - if val is not None: - if os.name == 'posix' or os.name == 'nt': - val = os.path.expanduser(val) - val = subst_vars(val, self.config_vars) - setattr(self, attr, val) - - def expand_basedirs(self): - """Calls `os.path.expanduser` on install_base, install_platbase and - root.""" - self._expand_attrs(['install_base', 'install_platbase', 'root']) - - def expand_dirs(self): - """Calls `os.path.expanduser` on install dirs.""" - dirs = [ - 'install_purelib', - 'install_platlib', - 'install_lib', - 'install_headers', - 'install_scripts', - 'install_data', - ] - self._expand_attrs(dirs) - - def run(self): - if self.verbose != self.distribution.verbose: - log.set_verbosity(self.verbose) - try: - for spec in self.args: - self.easy_install(spec, not self.no_deps) - if self.record: - outputs = list(sorted(self.outputs)) - if self.root: # strip any package prefix - root_len = len(self.root) - for counter in range(len(outputs)): - outputs[counter] = outputs[counter][root_len:] - from distutils import file_util - - self.execute( - file_util.write_file, (self.record, outputs), - "writing list of installed files to '%s'" % - self.record - ) - self.warn_deprecated_options() - finally: - log.set_verbosity(self.distribution.verbose) - - def pseudo_tempname(self): - """Return a pseudo-tempname base in the install directory. - This code is intentionally naive; if a malicious party can write to - the target directory you're already in deep doodoo. - """ - try: - pid = os.getpid() - except Exception: - pid = random.randint(0, sys.maxsize) - return os.path.join(self.install_dir, "test-easy-install-%s" % pid) - - def warn_deprecated_options(self): - pass - - def check_site_dir(self): - """Verify that self.install_dir is .pth-capable dir, if needed""" - - instdir = normalize_path(self.install_dir) - pth_file = os.path.join(instdir, 'easy-install.pth') - - # Is it a configured, PYTHONPATH, implicit, or explicit site dir? - is_site_dir = instdir in self.all_site_dirs - - if not is_site_dir and not self.multi_version: - # No? Then directly test whether it does .pth file processing - is_site_dir = self.check_pth_processing() - else: - # make sure we can write to target dir - testfile = self.pseudo_tempname() + '.write-test' - test_exists = os.path.exists(testfile) - try: - if test_exists: - os.unlink(testfile) - open(testfile, 'w').close() - os.unlink(testfile) - except (OSError, IOError): - self.cant_write_to_target() - - if not is_site_dir and not self.multi_version: - # Can't install non-multi to non-site dir - raise DistutilsError(self.no_default_version_msg()) - - if is_site_dir: - if self.pth_file is None: - self.pth_file = PthDistributions(pth_file, self.all_site_dirs) - else: - self.pth_file = None - - if instdir not in map(normalize_path, _pythonpath()): - # only PYTHONPATH dirs need a site.py, so pretend it's there - self.sitepy_installed = True - elif self.multi_version and not os.path.exists(pth_file): - self.sitepy_installed = True # don't need site.py in this case - self.pth_file = None # and don't create a .pth file - self.install_dir = instdir - - __cant_write_msg = textwrap.dedent(""" - can't create or remove files in install directory - - The following error occurred while trying to add or remove files in the - installation directory: - - %s - - The installation directory you specified (via --install-dir, --prefix, or - the distutils default setting) was: - - %s - """).lstrip() - - __not_exists_id = textwrap.dedent(""" - This directory does not currently exist. Please create it and try again, or - choose a different installation directory (using the -d or --install-dir - option). - """).lstrip() - - __access_msg = textwrap.dedent(""" - Perhaps your account does not have write access to this directory? If the - installation directory is a system-owned directory, you may need to sign in - as the administrator or "root" account. If you do not have administrative - access to this machine, you may wish to choose a different installation - directory, preferably one that is listed in your PYTHONPATH environment - variable. - - For information on other options, you may wish to consult the - documentation at: - - https://setuptools.readthedocs.io/en/latest/easy_install.html - - Please make the appropriate changes for your system and try again. - """).lstrip() - - def cant_write_to_target(self): - msg = self.__cant_write_msg % (sys.exc_info()[1], self.install_dir,) - - if not os.path.exists(self.install_dir): - msg += '\n' + self.__not_exists_id - else: - msg += '\n' + self.__access_msg - raise DistutilsError(msg) - - def check_pth_processing(self): - """Empirically verify whether .pth files are supported in inst. dir""" - instdir = self.install_dir - log.info("Checking .pth file support in %s", instdir) - pth_file = self.pseudo_tempname() + ".pth" - ok_file = pth_file + '.ok' - ok_exists = os.path.exists(ok_file) - tmpl = _one_liner(""" - import os - f = open({ok_file!r}, 'w') - f.write('OK') - f.close() - """) + '\n' - try: - if ok_exists: - os.unlink(ok_file) - dirname = os.path.dirname(ok_file) - pkg_resources.py31compat.makedirs(dirname, exist_ok=True) - f = open(pth_file, 'w') - except (OSError, IOError): - self.cant_write_to_target() - else: - try: - f.write(tmpl.format(**locals())) - f.close() - f = None - executable = sys.executable - if os.name == 'nt': - dirname, basename = os.path.split(executable) - alt = os.path.join(dirname, 'pythonw.exe') - use_alt = ( - basename.lower() == 'python.exe' and - os.path.exists(alt) - ) - if use_alt: - # use pythonw.exe to avoid opening a console window - executable = alt - - from distutils.spawn import spawn - - spawn([executable, '-E', '-c', 'pass'], 0) - - if os.path.exists(ok_file): - log.info( - "TEST PASSED: %s appears to support .pth files", - instdir - ) - return True - finally: - if f: - f.close() - if os.path.exists(ok_file): - os.unlink(ok_file) - if os.path.exists(pth_file): - os.unlink(pth_file) - if not self.multi_version: - log.warn("TEST FAILED: %s does NOT support .pth files", instdir) - return False - - def install_egg_scripts(self, dist): - """Write all the scripts for `dist`, unless scripts are excluded""" - if not self.exclude_scripts and dist.metadata_isdir('scripts'): - for script_name in dist.metadata_listdir('scripts'): - if dist.metadata_isdir('scripts/' + script_name): - # The "script" is a directory, likely a Python 3 - # __pycache__ directory, so skip it. - continue - self.install_script( - dist, script_name, - dist.get_metadata('scripts/' + script_name) - ) - self.install_wrapper_scripts(dist) - - def add_output(self, path): - if os.path.isdir(path): - for base, dirs, files in os.walk(path): - for filename in files: - self.outputs.append(os.path.join(base, filename)) - else: - self.outputs.append(path) - - def not_editable(self, spec): - if self.editable: - raise DistutilsArgError( - "Invalid argument %r: you can't use filenames or URLs " - "with --editable (except via the --find-links option)." - % (spec,) - ) - - def check_editable(self, spec): - if not self.editable: - return - - if os.path.exists(os.path.join(self.build_directory, spec.key)): - raise DistutilsArgError( - "%r already exists in %s; can't do a checkout there" % - (spec.key, self.build_directory) - ) - - @contextlib.contextmanager - def _tmpdir(self): - tmpdir = tempfile.mkdtemp(prefix=six.u("easy_install-")) - try: - # cast to str as workaround for #709 and #710 and #712 - yield str(tmpdir) - finally: - os.path.exists(tmpdir) and rmtree(rmtree_safe(tmpdir)) - - def easy_install(self, spec, deps=False): - if not self.editable: - self.install_site_py() - - with self._tmpdir() as tmpdir: - if not isinstance(spec, Requirement): - if URL_SCHEME(spec): - # It's a url, download it to tmpdir and process - self.not_editable(spec) - dl = self.package_index.download(spec, tmpdir) - return self.install_item(None, dl, tmpdir, deps, True) - - elif os.path.exists(spec): - # Existing file or directory, just process it directly - self.not_editable(spec) - return self.install_item(None, spec, tmpdir, deps, True) - else: - spec = parse_requirement_arg(spec) - - self.check_editable(spec) - dist = self.package_index.fetch_distribution( - spec, tmpdir, self.upgrade, self.editable, - not self.always_copy, self.local_index - ) - if dist is None: - msg = "Could not find suitable distribution for %r" % spec - if self.always_copy: - msg += " (--always-copy skips system and development eggs)" - raise DistutilsError(msg) - elif dist.precedence == DEVELOP_DIST: - # .egg-info dists don't need installing, just process deps - self.process_distribution(spec, dist, deps, "Using") - return dist - else: - return self.install_item(spec, dist.location, tmpdir, deps) - - def install_item(self, spec, download, tmpdir, deps, install_needed=False): - - # Installation is also needed if file in tmpdir or is not an egg - install_needed = install_needed or self.always_copy - install_needed = install_needed or os.path.dirname(download) == tmpdir - install_needed = install_needed or not download.endswith('.egg') - install_needed = install_needed or ( - self.always_copy_from is not None and - os.path.dirname(normalize_path(download)) == - normalize_path(self.always_copy_from) - ) - - if spec and not install_needed: - # at this point, we know it's a local .egg, we just don't know if - # it's already installed. - for dist in self.local_index[spec.project_name]: - if dist.location == download: - break - else: - install_needed = True # it's not in the local index - - log.info("Processing %s", os.path.basename(download)) - - if install_needed: - dists = self.install_eggs(spec, download, tmpdir) - for dist in dists: - self.process_distribution(spec, dist, deps) - else: - dists = [self.egg_distribution(download)] - self.process_distribution(spec, dists[0], deps, "Using") - - if spec is not None: - for dist in dists: - if dist in spec: - return dist - - def select_scheme(self, name): - """Sets the install directories by applying the install schemes.""" - # it's the caller's problem if they supply a bad name! - scheme = INSTALL_SCHEMES[name] - for key in SCHEME_KEYS: - attrname = 'install_' + key - if getattr(self, attrname) is None: - setattr(self, attrname, scheme[key]) - - def process_distribution(self, requirement, dist, deps=True, *info): - self.update_pth(dist) - self.package_index.add(dist) - if dist in self.local_index[dist.key]: - self.local_index.remove(dist) - self.local_index.add(dist) - self.install_egg_scripts(dist) - self.installed_projects[dist.key] = dist - log.info(self.installation_report(requirement, dist, *info)) - if (dist.has_metadata('dependency_links.txt') and - not self.no_find_links): - self.package_index.add_find_links( - dist.get_metadata_lines('dependency_links.txt') - ) - if not deps and not self.always_copy: - return - elif requirement is not None and dist.key != requirement.key: - log.warn("Skipping dependencies for %s", dist) - return # XXX this is not the distribution we were looking for - elif requirement is None or dist not in requirement: - # if we wound up with a different version, resolve what we've got - distreq = dist.as_requirement() - requirement = Requirement(str(distreq)) - log.info("Processing dependencies for %s", requirement) - try: - distros = WorkingSet([]).resolve( - [requirement], self.local_index, self.easy_install - ) - except DistributionNotFound as e: - raise DistutilsError(str(e)) - except VersionConflict as e: - raise DistutilsError(e.report()) - if self.always_copy or self.always_copy_from: - # Force all the relevant distros to be copied or activated - for dist in distros: - if dist.key not in self.installed_projects: - self.easy_install(dist.as_requirement()) - log.info("Finished processing dependencies for %s", requirement) - - def should_unzip(self, dist): - if self.zip_ok is not None: - return not self.zip_ok - if dist.has_metadata('not-zip-safe'): - return True - if not dist.has_metadata('zip-safe'): - return True - return False - - def maybe_move(self, spec, dist_filename, setup_base): - dst = os.path.join(self.build_directory, spec.key) - if os.path.exists(dst): - msg = ( - "%r already exists in %s; build directory %s will not be kept" - ) - log.warn(msg, spec.key, self.build_directory, setup_base) - return setup_base - if os.path.isdir(dist_filename): - setup_base = dist_filename - else: - if os.path.dirname(dist_filename) == setup_base: - os.unlink(dist_filename) # get it out of the tmp dir - contents = os.listdir(setup_base) - if len(contents) == 1: - dist_filename = os.path.join(setup_base, contents[0]) - if os.path.isdir(dist_filename): - # if the only thing there is a directory, move it instead - setup_base = dist_filename - ensure_directory(dst) - shutil.move(setup_base, dst) - return dst - - def install_wrapper_scripts(self, dist): - if self.exclude_scripts: - return - for args in ScriptWriter.best().get_args(dist): - self.write_script(*args) - - def install_script(self, dist, script_name, script_text, dev_path=None): - """Generate a legacy script wrapper and install it""" - spec = str(dist.as_requirement()) - is_script = is_python_script(script_text, script_name) - - if is_script: - body = self._load_template(dev_path) % locals() - script_text = ScriptWriter.get_header(script_text) + body - self.write_script(script_name, _to_ascii(script_text), 'b') - - @staticmethod - def _load_template(dev_path): - """ - There are a couple of template scripts in the package. This - function loads one of them and prepares it for use. - """ - # See https://github.com/pypa/setuptools/issues/134 for info - # on script file naming and downstream issues with SVR4 - name = 'script.tmpl' - if dev_path: - name = name.replace('.tmpl', ' (dev).tmpl') - - raw_bytes = resource_string('setuptools', name) - return raw_bytes.decode('utf-8') - - def write_script(self, script_name, contents, mode="t", blockers=()): - """Write an executable file to the scripts directory""" - self.delete_blockers( # clean up old .py/.pyw w/o a script - [os.path.join(self.script_dir, x) for x in blockers] - ) - log.info("Installing %s script to %s", script_name, self.script_dir) - target = os.path.join(self.script_dir, script_name) - self.add_output(target) - - if self.dry_run: - return - - mask = current_umask() - ensure_directory(target) - if os.path.exists(target): - os.unlink(target) - with open(target, "w" + mode) as f: - f.write(contents) - chmod(target, 0o777 - mask) - - def install_eggs(self, spec, dist_filename, tmpdir): - # .egg dirs or files are already built, so just return them - if dist_filename.lower().endswith('.egg'): - return [self.install_egg(dist_filename, tmpdir)] - elif dist_filename.lower().endswith('.exe'): - return [self.install_exe(dist_filename, tmpdir)] - elif dist_filename.lower().endswith('.whl'): - return [self.install_wheel(dist_filename, tmpdir)] - - # Anything else, try to extract and build - setup_base = tmpdir - if os.path.isfile(dist_filename) and not dist_filename.endswith('.py'): - unpack_archive(dist_filename, tmpdir, self.unpack_progress) - elif os.path.isdir(dist_filename): - setup_base = os.path.abspath(dist_filename) - - if (setup_base.startswith(tmpdir) # something we downloaded - and self.build_directory and spec is not None): - setup_base = self.maybe_move(spec, dist_filename, setup_base) - - # Find the setup.py file - setup_script = os.path.join(setup_base, 'setup.py') - - if not os.path.exists(setup_script): - setups = glob(os.path.join(setup_base, '*', 'setup.py')) - if not setups: - raise DistutilsError( - "Couldn't find a setup script in %s" % - os.path.abspath(dist_filename) - ) - if len(setups) > 1: - raise DistutilsError( - "Multiple setup scripts in %s" % - os.path.abspath(dist_filename) - ) - setup_script = setups[0] - - # Now run it, and return the result - if self.editable: - log.info(self.report_editable(spec, setup_script)) - return [] - else: - return self.build_and_install(setup_script, setup_base) - - def egg_distribution(self, egg_path): - if os.path.isdir(egg_path): - metadata = PathMetadata(egg_path, os.path.join(egg_path, - 'EGG-INFO')) - else: - metadata = EggMetadata(zipimport.zipimporter(egg_path)) - return Distribution.from_filename(egg_path, metadata=metadata) - - def install_egg(self, egg_path, tmpdir): - destination = os.path.join( - self.install_dir, - os.path.basename(egg_path), - ) - destination = os.path.abspath(destination) - if not self.dry_run: - ensure_directory(destination) - - dist = self.egg_distribution(egg_path) - if not samefile(egg_path, destination): - if os.path.isdir(destination) and not os.path.islink(destination): - dir_util.remove_tree(destination, dry_run=self.dry_run) - elif os.path.exists(destination): - self.execute( - os.unlink, - (destination,), - "Removing " + destination, - ) - try: - new_dist_is_zipped = False - if os.path.isdir(egg_path): - if egg_path.startswith(tmpdir): - f, m = shutil.move, "Moving" - else: - f, m = shutil.copytree, "Copying" - elif self.should_unzip(dist): - self.mkpath(destination) - f, m = self.unpack_and_compile, "Extracting" - else: - new_dist_is_zipped = True - if egg_path.startswith(tmpdir): - f, m = shutil.move, "Moving" - else: - f, m = shutil.copy2, "Copying" - self.execute( - f, - (egg_path, destination), - (m + " %s to %s") % ( - os.path.basename(egg_path), - os.path.dirname(destination) - ), - ) - update_dist_caches( - destination, - fix_zipimporter_caches=new_dist_is_zipped, - ) - except Exception: - update_dist_caches(destination, fix_zipimporter_caches=False) - raise - - self.add_output(destination) - return self.egg_distribution(destination) - - def install_exe(self, dist_filename, tmpdir): - # See if it's valid, get data - cfg = extract_wininst_cfg(dist_filename) - if cfg is None: - raise DistutilsError( - "%s is not a valid distutils Windows .exe" % dist_filename - ) - # Create a dummy distribution object until we build the real distro - dist = Distribution( - None, - project_name=cfg.get('metadata', 'name'), - version=cfg.get('metadata', 'version'), platform=get_platform(), - ) - - # Convert the .exe to an unpacked egg - egg_path = os.path.join(tmpdir, dist.egg_name() + '.egg') - dist.location = egg_path - egg_tmp = egg_path + '.tmp' - _egg_info = os.path.join(egg_tmp, 'EGG-INFO') - pkg_inf = os.path.join(_egg_info, 'PKG-INFO') - ensure_directory(pkg_inf) # make sure EGG-INFO dir exists - dist._provider = PathMetadata(egg_tmp, _egg_info) # XXX - self.exe_to_egg(dist_filename, egg_tmp) - - # Write EGG-INFO/PKG-INFO - if not os.path.exists(pkg_inf): - f = open(pkg_inf, 'w') - f.write('Metadata-Version: 1.0\n') - for k, v in cfg.items('metadata'): - if k != 'target_version': - f.write('%s: %s\n' % (k.replace('_', '-').title(), v)) - f.close() - script_dir = os.path.join(_egg_info, 'scripts') - # delete entry-point scripts to avoid duping - self.delete_blockers([ - os.path.join(script_dir, args[0]) - for args in ScriptWriter.get_args(dist) - ]) - # Build .egg file from tmpdir - bdist_egg.make_zipfile( - egg_path, egg_tmp, verbose=self.verbose, dry_run=self.dry_run, - ) - # install the .egg - return self.install_egg(egg_path, tmpdir) - - def exe_to_egg(self, dist_filename, egg_tmp): - """Extract a bdist_wininst to the directories an egg would use""" - # Check for .pth file and set up prefix translations - prefixes = get_exe_prefixes(dist_filename) - to_compile = [] - native_libs = [] - top_level = {} - - def process(src, dst): - s = src.lower() - for old, new in prefixes: - if s.startswith(old): - src = new + src[len(old):] - parts = src.split('/') - dst = os.path.join(egg_tmp, *parts) - dl = dst.lower() - if dl.endswith('.pyd') or dl.endswith('.dll'): - parts[-1] = bdist_egg.strip_module(parts[-1]) - top_level[os.path.splitext(parts[0])[0]] = 1 - native_libs.append(src) - elif dl.endswith('.py') and old != 'SCRIPTS/': - top_level[os.path.splitext(parts[0])[0]] = 1 - to_compile.append(dst) - return dst - if not src.endswith('.pth'): - log.warn("WARNING: can't process %s", src) - return None - - # extract, tracking .pyd/.dll->native_libs and .py -> to_compile - unpack_archive(dist_filename, egg_tmp, process) - stubs = [] - for res in native_libs: - if res.lower().endswith('.pyd'): # create stubs for .pyd's - parts = res.split('/') - resource = parts[-1] - parts[-1] = bdist_egg.strip_module(parts[-1]) + '.py' - pyfile = os.path.join(egg_tmp, *parts) - to_compile.append(pyfile) - stubs.append(pyfile) - bdist_egg.write_stub(resource, pyfile) - self.byte_compile(to_compile) # compile .py's - bdist_egg.write_safety_flag( - os.path.join(egg_tmp, 'EGG-INFO'), - bdist_egg.analyze_egg(egg_tmp, stubs)) # write zip-safety flag - - for name in 'top_level', 'native_libs': - if locals()[name]: - txt = os.path.join(egg_tmp, 'EGG-INFO', name + '.txt') - if not os.path.exists(txt): - f = open(txt, 'w') - f.write('\n'.join(locals()[name]) + '\n') - f.close() - - def install_wheel(self, wheel_path, tmpdir): - wheel = Wheel(wheel_path) - assert wheel.is_compatible() - destination = os.path.join(self.install_dir, wheel.egg_name()) - destination = os.path.abspath(destination) - if not self.dry_run: - ensure_directory(destination) - if os.path.isdir(destination) and not os.path.islink(destination): - dir_util.remove_tree(destination, dry_run=self.dry_run) - elif os.path.exists(destination): - self.execute( - os.unlink, - (destination,), - "Removing " + destination, - ) - try: - self.execute( - wheel.install_as_egg, - (destination,), - ("Installing %s to %s") % ( - os.path.basename(wheel_path), - os.path.dirname(destination) - ), - ) - finally: - update_dist_caches(destination, fix_zipimporter_caches=False) - self.add_output(destination) - return self.egg_distribution(destination) - - __mv_warning = textwrap.dedent(""" - Because this distribution was installed --multi-version, before you can - import modules from this package in an application, you will need to - 'import pkg_resources' and then use a 'require()' call similar to one of - these examples, in order to select the desired version: - - pkg_resources.require("%(name)s") # latest installed version - pkg_resources.require("%(name)s==%(version)s") # this exact version - pkg_resources.require("%(name)s>=%(version)s") # this version or higher - """).lstrip() - - __id_warning = textwrap.dedent(""" - Note also that the installation directory must be on sys.path at runtime for - this to work. (e.g. by being the application's script directory, by being on - PYTHONPATH, or by being added to sys.path by your code.) - """) - - def installation_report(self, req, dist, what="Installed"): - """Helpful installation message for display to package users""" - msg = "\n%(what)s %(eggloc)s%(extras)s" - if self.multi_version and not self.no_report: - msg += '\n' + self.__mv_warning - if self.install_dir not in map(normalize_path, sys.path): - msg += '\n' + self.__id_warning - - eggloc = dist.location - name = dist.project_name - version = dist.version - extras = '' # TODO: self.report_extras(req, dist) - return msg % locals() - - __editable_msg = textwrap.dedent(""" - Extracted editable version of %(spec)s to %(dirname)s - - If it uses setuptools in its setup script, you can activate it in - "development" mode by going to that directory and running:: - - %(python)s setup.py develop - - See the setuptools documentation for the "develop" command for more info. - """).lstrip() - - def report_editable(self, spec, setup_script): - dirname = os.path.dirname(setup_script) - python = sys.executable - return '\n' + self.__editable_msg % locals() - - def run_setup(self, setup_script, setup_base, args): - sys.modules.setdefault('distutils.command.bdist_egg', bdist_egg) - sys.modules.setdefault('distutils.command.egg_info', egg_info) - - args = list(args) - if self.verbose > 2: - v = 'v' * (self.verbose - 1) - args.insert(0, '-' + v) - elif self.verbose < 2: - args.insert(0, '-q') - if self.dry_run: - args.insert(0, '-n') - log.info( - "Running %s %s", setup_script[len(setup_base) + 1:], ' '.join(args) - ) - try: - run_setup(setup_script, args) - except SystemExit as v: - raise DistutilsError("Setup script exited with %s" % (v.args[0],)) - - def build_and_install(self, setup_script, setup_base): - args = ['bdist_egg', '--dist-dir'] - - dist_dir = tempfile.mkdtemp( - prefix='egg-dist-tmp-', dir=os.path.dirname(setup_script) - ) - try: - self._set_fetcher_options(os.path.dirname(setup_script)) - args.append(dist_dir) - - self.run_setup(setup_script, setup_base, args) - all_eggs = Environment([dist_dir]) - eggs = [] - for key in all_eggs: - for dist in all_eggs[key]: - eggs.append(self.install_egg(dist.location, setup_base)) - if not eggs and not self.dry_run: - log.warn("No eggs found in %s (setup script problem?)", - dist_dir) - return eggs - finally: - rmtree(dist_dir) - log.set_verbosity(self.verbose) # restore our log verbosity - - def _set_fetcher_options(self, base): - """ - When easy_install is about to run bdist_egg on a source dist, that - source dist might have 'setup_requires' directives, requiring - additional fetching. Ensure the fetcher options given to easy_install - are available to that command as well. - """ - # find the fetch options from easy_install and write them out - # to the setup.cfg file. - ei_opts = self.distribution.get_option_dict('easy_install').copy() - fetch_directives = ( - 'find_links', 'site_dirs', 'index_url', 'optimize', - 'site_dirs', 'allow_hosts', - ) - fetch_options = {} - for key, val in ei_opts.items(): - if key not in fetch_directives: - continue - fetch_options[key.replace('_', '-')] = val[1] - # create a settings dictionary suitable for `edit_config` - settings = dict(easy_install=fetch_options) - cfg_filename = os.path.join(base, 'setup.cfg') - setopt.edit_config(cfg_filename, settings) - - def update_pth(self, dist): - if self.pth_file is None: - return - - for d in self.pth_file[dist.key]: # drop old entries - if self.multi_version or d.location != dist.location: - log.info("Removing %s from easy-install.pth file", d) - self.pth_file.remove(d) - if d.location in self.shadow_path: - self.shadow_path.remove(d.location) - - if not self.multi_version: - if dist.location in self.pth_file.paths: - log.info( - "%s is already the active version in easy-install.pth", - dist, - ) - else: - log.info("Adding %s to easy-install.pth file", dist) - self.pth_file.add(dist) # add new entry - if dist.location not in self.shadow_path: - self.shadow_path.append(dist.location) - - if not self.dry_run: - - self.pth_file.save() - - if dist.key == 'setuptools': - # Ensure that setuptools itself never becomes unavailable! - # XXX should this check for latest version? - filename = os.path.join(self.install_dir, 'setuptools.pth') - if os.path.islink(filename): - os.unlink(filename) - f = open(filename, 'wt') - f.write(self.pth_file.make_relative(dist.location) + '\n') - f.close() - - def unpack_progress(self, src, dst): - # Progress filter for unpacking - log.debug("Unpacking %s to %s", src, dst) - return dst # only unpack-and-compile skips files for dry run - - def unpack_and_compile(self, egg_path, destination): - to_compile = [] - to_chmod = [] - - def pf(src, dst): - if dst.endswith('.py') and not src.startswith('EGG-INFO/'): - to_compile.append(dst) - elif dst.endswith('.dll') or dst.endswith('.so'): - to_chmod.append(dst) - self.unpack_progress(src, dst) - return not self.dry_run and dst or None - - unpack_archive(egg_path, destination, pf) - self.byte_compile(to_compile) - if not self.dry_run: - for f in to_chmod: - mode = ((os.stat(f)[stat.ST_MODE]) | 0o555) & 0o7755 - chmod(f, mode) - - def byte_compile(self, to_compile): - if sys.dont_write_bytecode: - return - - from distutils.util import byte_compile - - try: - # try to make the byte compile messages quieter - log.set_verbosity(self.verbose - 1) - - byte_compile(to_compile, optimize=0, force=1, dry_run=self.dry_run) - if self.optimize: - byte_compile( - to_compile, optimize=self.optimize, force=1, - dry_run=self.dry_run, - ) - finally: - log.set_verbosity(self.verbose) # restore original verbosity - - __no_default_msg = textwrap.dedent(""" - bad install directory or PYTHONPATH - - You are attempting to install a package to a directory that is not - on PYTHONPATH and which Python does not read ".pth" files from. The - installation directory you specified (via --install-dir, --prefix, or - the distutils default setting) was: - - %s - - and your PYTHONPATH environment variable currently contains: - - %r - - Here are some of your options for correcting the problem: - - * You can choose a different installation directory, i.e., one that is - on PYTHONPATH or supports .pth files - - * You can add the installation directory to the PYTHONPATH environment - variable. (It must then also be on PYTHONPATH whenever you run - Python and want to use the package(s) you are installing.) - - * You can set up the installation directory to support ".pth" files by - using one of the approaches described here: - - https://setuptools.readthedocs.io/en/latest/easy_install.html#custom-installation-locations - - - Please make the appropriate changes for your system and try again.""").lstrip() - - def no_default_version_msg(self): - template = self.__no_default_msg - return template % (self.install_dir, os.environ.get('PYTHONPATH', '')) - - def install_site_py(self): - """Make sure there's a site.py in the target dir, if needed""" - - if self.sitepy_installed: - return # already did it, or don't need to - - sitepy = os.path.join(self.install_dir, "site.py") - source = resource_string("setuptools", "site-patch.py") - source = source.decode('utf-8') - current = "" - - if os.path.exists(sitepy): - log.debug("Checking existing site.py in %s", self.install_dir) - with io.open(sitepy) as strm: - current = strm.read() - - if not current.startswith('def __boot():'): - raise DistutilsError( - "%s is not a setuptools-generated site.py; please" - " remove it." % sitepy - ) - - if current != source: - log.info("Creating %s", sitepy) - if not self.dry_run: - ensure_directory(sitepy) - with io.open(sitepy, 'w', encoding='utf-8') as strm: - strm.write(source) - self.byte_compile([sitepy]) - - self.sitepy_installed = True - - def create_home_path(self): - """Create directories under ~.""" - if not self.user: - return - home = convert_path(os.path.expanduser("~")) - for name, path in six.iteritems(self.config_vars): - if path.startswith(home) and not os.path.isdir(path): - self.debug_print("os.makedirs('%s', 0o700)" % path) - os.makedirs(path, 0o700) - - if sys.version[:3] in ('2.3', '2.4', '2.5') or 'real_prefix' in sys.__dict__: - sitedir_name = 'site-packages' - else: - sitedir_name = 'dist-packages' - - INSTALL_SCHEMES = dict( - posix=dict( - install_dir='$base/lib/python$py_version_short/site-packages', - script_dir='$base/bin', - ), - unix_local = dict( - install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name, - script_dir = '$base/local/bin', - ), - posix_local = dict( - install_dir = '$base/local/lib/python$py_version_short/%s' % sitedir_name, - script_dir = '$base/local/bin', - ), - deb_system = dict( - install_dir = '$base/lib/python3/%s' % sitedir_name, - script_dir = '$base/bin', - ), - ) - - DEFAULT_SCHEME = dict( - install_dir='$base/Lib/site-packages', - script_dir='$base/Scripts', - ) - - def _expand(self, *attrs): - config_vars = self.get_finalized_command('install').config_vars - - if self.prefix or self.install_layout: - if self.install_layout and self.install_layout in ['deb']: - scheme_name = "deb_system" - self.prefix = '/usr' - elif self.prefix or 'real_prefix' in sys.__dict__: - scheme_name = os.name - else: - scheme_name = "posix_local" - # Set default install_dir/scripts from --prefix - config_vars = config_vars.copy() - config_vars['base'] = self.prefix - scheme = self.INSTALL_SCHEMES.get(scheme_name,self.DEFAULT_SCHEME) - for attr, val in scheme.items(): - if getattr(self, attr, None) is None: - setattr(self, attr, val) - - from distutils.util import subst_vars - - for attr in attrs: - val = getattr(self, attr) - if val is not None: - val = subst_vars(val, config_vars) - if os.name == 'posix': - val = os.path.expanduser(val) - setattr(self, attr, val) - - -def _pythonpath(): - items = os.environ.get('PYTHONPATH', '').split(os.pathsep) - return filter(None, items) - - -def get_site_dirs(): - """ - Return a list of 'site' dirs - """ - - sitedirs = [] - - # start with PYTHONPATH - sitedirs.extend(_pythonpath()) - - prefixes = [sys.prefix] - if sys.exec_prefix != sys.prefix: - prefixes.append(sys.exec_prefix) - for prefix in prefixes: - if prefix: - if sys.platform in ('os2emx', 'riscos'): - sitedirs.append(os.path.join(prefix, "Lib", "site-packages")) - elif os.sep == '/': - sitedirs.extend([ - os.path.join( - prefix, - "local/lib", - "python" + sys.version[:3], - "dist-packages", - ), - os.path.join( - prefix, - "lib", - "python" + sys.version[:3], - "dist-packages", - ), - os.path.join(prefix, "lib", "site-python"), - ]) - else: - sitedirs.extend([ - prefix, - os.path.join(prefix, "lib", "site-packages"), - ]) - if sys.platform == 'darwin': - # for framework builds *only* we add the standard Apple - # locations. Currently only per-user, but /Library and - # /Network/Library could be added too - if 'Python.framework' in prefix: - home = os.environ.get('HOME') - if home: - home_sp = os.path.join( - home, - 'Library', - 'Python', - sys.version[:3], - 'site-packages', - ) - sitedirs.append(home_sp) - lib_paths = get_path('purelib'), get_path('platlib') - for site_lib in lib_paths: - if site_lib not in sitedirs: - sitedirs.append(site_lib) - - if site.ENABLE_USER_SITE: - sitedirs.append(site.USER_SITE) - - try: - sitedirs.extend(site.getsitepackages()) - except AttributeError: - pass - - sitedirs = list(map(normalize_path, sitedirs)) - - return sitedirs - - -def expand_paths(inputs): - """Yield sys.path directories that might contain "old-style" packages""" - - seen = {} - - for dirname in inputs: - dirname = normalize_path(dirname) - if dirname in seen: - continue - - seen[dirname] = 1 - if not os.path.isdir(dirname): - continue - - files = os.listdir(dirname) - yield dirname, files - - for name in files: - if not name.endswith('.pth'): - # We only care about the .pth files - continue - if name in ('easy-install.pth', 'setuptools.pth'): - # Ignore .pth files that we control - continue - - # Read the .pth file - f = open(os.path.join(dirname, name)) - lines = list(yield_lines(f)) - f.close() - - # Yield existing non-dupe, non-import directory lines from it - for line in lines: - if not line.startswith("import"): - line = normalize_path(line.rstrip()) - if line not in seen: - seen[line] = 1 - if not os.path.isdir(line): - continue - yield line, os.listdir(line) - - -def extract_wininst_cfg(dist_filename): - """Extract configuration data from a bdist_wininst .exe - - Returns a configparser.RawConfigParser, or None - """ - f = open(dist_filename, 'rb') - try: - endrec = zipfile._EndRecData(f) - if endrec is None: - return None - - prepended = (endrec[9] - endrec[5]) - endrec[6] - if prepended < 12: # no wininst data here - return None - f.seek(prepended - 12) - - tag, cfglen, bmlen = struct.unpack("<iii", f.read(12)) - if tag not in (0x1234567A, 0x1234567B): - return None # not a valid tag - - f.seek(prepended - (12 + cfglen)) - init = {'version': '', 'target_version': ''} - cfg = configparser.RawConfigParser(init) - try: - part = f.read(cfglen) - # Read up to the first null byte. - config = part.split(b'\0', 1)[0] - # Now the config is in bytes, but for RawConfigParser, it should - # be text, so decode it. - config = config.decode(sys.getfilesystemencoding()) - cfg.readfp(six.StringIO(config)) - except configparser.Error: - return None - if not cfg.has_section('metadata') or not cfg.has_section('Setup'): - return None - return cfg - - finally: - f.close() - - -def get_exe_prefixes(exe_filename): - """Get exe->egg path translations for a given .exe file""" - - prefixes = [ - ('PURELIB/', ''), - ('PLATLIB/pywin32_system32', ''), - ('PLATLIB/', ''), - ('SCRIPTS/', 'EGG-INFO/scripts/'), - ('DATA/lib/site-packages', ''), - ] - z = zipfile.ZipFile(exe_filename) - try: - for info in z.infolist(): - name = info.filename - parts = name.split('/') - if len(parts) == 3 and parts[2] == 'PKG-INFO': - if parts[1].endswith('.egg-info'): - prefixes.insert(0, ('/'.join(parts[:2]), 'EGG-INFO/')) - break - if len(parts) != 2 or not name.endswith('.pth'): - continue - if name.endswith('-nspkg.pth'): - continue - if parts[0].upper() in ('PURELIB', 'PLATLIB'): - contents = z.read(name) - if six.PY3: - contents = contents.decode() - for pth in yield_lines(contents): - pth = pth.strip().replace('\\', '/') - if not pth.startswith('import'): - prefixes.append((('%s/%s/' % (parts[0], pth)), '')) - finally: - z.close() - prefixes = [(x.lower(), y) for x, y in prefixes] - prefixes.sort() - prefixes.reverse() - return prefixes - - -class PthDistributions(Environment): - """A .pth file with Distribution paths in it""" - - dirty = False - - def __init__(self, filename, sitedirs=()): - self.filename = filename - self.sitedirs = list(map(normalize_path, sitedirs)) - self.basedir = normalize_path(os.path.dirname(self.filename)) - self._load() - Environment.__init__(self, [], None, None) - for path in yield_lines(self.paths): - list(map(self.add, find_distributions(path, True))) - - def _load(self): - self.paths = [] - saw_import = False - seen = dict.fromkeys(self.sitedirs) - if os.path.isfile(self.filename): - f = open(self.filename, 'rt') - for line in f: - if line.startswith('import'): - saw_import = True - continue - path = line.rstrip() - self.paths.append(path) - if not path.strip() or path.strip().startswith('#'): - continue - # skip non-existent paths, in case somebody deleted a package - # manually, and duplicate paths as well - path = self.paths[-1] = normalize_path( - os.path.join(self.basedir, path) - ) - if not os.path.exists(path) or path in seen: - self.paths.pop() # skip it - self.dirty = True # we cleaned up, so we're dirty now :) - continue - seen[path] = 1 - f.close() - - if self.paths and not saw_import: - self.dirty = True # ensure anything we touch has import wrappers - while self.paths and not self.paths[-1].strip(): - self.paths.pop() - - def save(self): - """Write changed .pth file back to disk""" - if not self.dirty: - return - - rel_paths = list(map(self.make_relative, self.paths)) - if rel_paths: - log.debug("Saving %s", self.filename) - lines = self._wrap_lines(rel_paths) - data = '\n'.join(lines) + '\n' - - if os.path.islink(self.filename): - os.unlink(self.filename) - with open(self.filename, 'wt') as f: - f.write(data) - - elif os.path.exists(self.filename): - log.debug("Deleting empty %s", self.filename) - os.unlink(self.filename) - - self.dirty = False - - @staticmethod - def _wrap_lines(lines): - return lines - - def add(self, dist): - """Add `dist` to the distribution map""" - new_path = ( - dist.location not in self.paths and ( - dist.location not in self.sitedirs or - # account for '.' being in PYTHONPATH - dist.location == os.getcwd() - ) - ) - if new_path: - self.paths.append(dist.location) - self.dirty = True - Environment.add(self, dist) - - def remove(self, dist): - """Remove `dist` from the distribution map""" - while dist.location in self.paths: - self.paths.remove(dist.location) - self.dirty = True - Environment.remove(self, dist) - - def make_relative(self, path): - npath, last = os.path.split(normalize_path(path)) - baselen = len(self.basedir) - parts = [last] - sep = os.altsep == '/' and '/' or os.sep - while len(npath) >= baselen: - if npath == self.basedir: - parts.append(os.curdir) - parts.reverse() - return sep.join(parts) - npath, last = os.path.split(npath) - parts.append(last) - else: - return path - - -class RewritePthDistributions(PthDistributions): - @classmethod - def _wrap_lines(cls, lines): - yield cls.prelude - for line in lines: - yield line - yield cls.postlude - - prelude = _one_liner(""" - import sys - sys.__plen = len(sys.path) - """) - postlude = _one_liner(""" - import sys - new = sys.path[sys.__plen:] - del sys.path[sys.__plen:] - p = getattr(sys, '__egginsert', 0) - sys.path[p:p] = new - sys.__egginsert = p + len(new) - """) - - -if os.environ.get('SETUPTOOLS_SYS_PATH_TECHNIQUE', 'raw') == 'rewrite': - PthDistributions = RewritePthDistributions - - -def _first_line_re(): - """ - Return a regular expression based on first_line_re suitable for matching - strings. - """ - if isinstance(first_line_re.pattern, str): - return first_line_re - - # first_line_re in Python >=3.1.4 and >=3.2.1 is a bytes pattern. - return re.compile(first_line_re.pattern.decode()) - - -def auto_chmod(func, arg, exc): - if func in [os.unlink, os.remove] and os.name == 'nt': - chmod(arg, stat.S_IWRITE) - return func(arg) - et, ev, _ = sys.exc_info() - six.reraise(et, (ev[0], ev[1] + (" %s %s" % (func, arg)))) - - -def update_dist_caches(dist_path, fix_zipimporter_caches): - """ - Fix any globally cached `dist_path` related data - - `dist_path` should be a path of a newly installed egg distribution (zipped - or unzipped). - - sys.path_importer_cache contains finder objects that have been cached when - importing data from the original distribution. Any such finders need to be - cleared since the replacement distribution might be packaged differently, - e.g. a zipped egg distribution might get replaced with an unzipped egg - folder or vice versa. Having the old finders cached may then cause Python - to attempt loading modules from the replacement distribution using an - incorrect loader. - - zipimport.zipimporter objects are Python loaders charged with importing - data packaged inside zip archives. If stale loaders referencing the - original distribution, are left behind, they can fail to load modules from - the replacement distribution. E.g. if an old zipimport.zipimporter instance - is used to load data from a new zipped egg archive, it may cause the - operation to attempt to locate the requested data in the wrong location - - one indicated by the original distribution's zip archive directory - information. Such an operation may then fail outright, e.g. report having - read a 'bad local file header', or even worse, it may fail silently & - return invalid data. - - zipimport._zip_directory_cache contains cached zip archive directory - information for all existing zipimport.zipimporter instances and all such - instances connected to the same archive share the same cached directory - information. - - If asked, and the underlying Python implementation allows it, we can fix - all existing zipimport.zipimporter instances instead of having to track - them down and remove them one by one, by updating their shared cached zip - archive directory information. This, of course, assumes that the - replacement distribution is packaged as a zipped egg. - - If not asked to fix existing zipimport.zipimporter instances, we still do - our best to clear any remaining zipimport.zipimporter related cached data - that might somehow later get used when attempting to load data from the new - distribution and thus cause such load operations to fail. Note that when - tracking down such remaining stale data, we can not catch every conceivable - usage from here, and we clear only those that we know of and have found to - cause problems if left alive. Any remaining caches should be updated by - whomever is in charge of maintaining them, i.e. they should be ready to - handle us replacing their zip archives with new distributions at runtime. - - """ - # There are several other known sources of stale zipimport.zipimporter - # instances that we do not clear here, but might if ever given a reason to - # do so: - # * Global setuptools pkg_resources.working_set (a.k.a. 'master working - # set') may contain distributions which may in turn contain their - # zipimport.zipimporter loaders. - # * Several zipimport.zipimporter loaders held by local variables further - # up the function call stack when running the setuptools installation. - # * Already loaded modules may have their __loader__ attribute set to the - # exact loader instance used when importing them. Python 3.4 docs state - # that this information is intended mostly for introspection and so is - # not expected to cause us problems. - normalized_path = normalize_path(dist_path) - _uncache(normalized_path, sys.path_importer_cache) - if fix_zipimporter_caches: - _replace_zip_directory_cache_data(normalized_path) - else: - # Here, even though we do not want to fix existing and now stale - # zipimporter cache information, we still want to remove it. Related to - # Python's zip archive directory information cache, we clear each of - # its stale entries in two phases: - # 1. Clear the entry so attempting to access zip archive information - # via any existing stale zipimport.zipimporter instances fails. - # 2. Remove the entry from the cache so any newly constructed - # zipimport.zipimporter instances do not end up using old stale - # zip archive directory information. - # This whole stale data removal step does not seem strictly necessary, - # but has been left in because it was done before we started replacing - # the zip archive directory information cache content if possible, and - # there are no relevant unit tests that we can depend on to tell us if - # this is really needed. - _remove_and_clear_zip_directory_cache_data(normalized_path) - - -def _collect_zipimporter_cache_entries(normalized_path, cache): - """ - Return zipimporter cache entry keys related to a given normalized path. - - Alternative path spellings (e.g. those using different character case or - those using alternative path separators) related to the same path are - included. Any sub-path entries are included as well, i.e. those - corresponding to zip archives embedded in other zip archives. - - """ - result = [] - prefix_len = len(normalized_path) - for p in cache: - np = normalize_path(p) - if (np.startswith(normalized_path) and - np[prefix_len:prefix_len + 1] in (os.sep, '')): - result.append(p) - return result - - -def _update_zipimporter_cache(normalized_path, cache, updater=None): - """ - Update zipimporter cache data for a given normalized path. - - Any sub-path entries are processed as well, i.e. those corresponding to zip - archives embedded in other zip archives. - - Given updater is a callable taking a cache entry key and the original entry - (after already removing the entry from the cache), and expected to update - the entry and possibly return a new one to be inserted in its place. - Returning None indicates that the entry should not be replaced with a new - one. If no updater is given, the cache entries are simply removed without - any additional processing, the same as if the updater simply returned None. - - """ - for p in _collect_zipimporter_cache_entries(normalized_path, cache): - # N.B. pypy's custom zipimport._zip_directory_cache implementation does - # not support the complete dict interface: - # * Does not support item assignment, thus not allowing this function - # to be used only for removing existing cache entries. - # * Does not support the dict.pop() method, forcing us to use the - # get/del patterns instead. For more detailed information see the - # following links: - # https://github.com/pypa/setuptools/issues/202#issuecomment-202913420 - # http://bit.ly/2h9itJX - old_entry = cache[p] - del cache[p] - new_entry = updater and updater(p, old_entry) - if new_entry is not None: - cache[p] = new_entry - - -def _uncache(normalized_path, cache): - _update_zipimporter_cache(normalized_path, cache) - - -def _remove_and_clear_zip_directory_cache_data(normalized_path): - def clear_and_remove_cached_zip_archive_directory_data(path, old_entry): - old_entry.clear() - - _update_zipimporter_cache( - normalized_path, zipimport._zip_directory_cache, - updater=clear_and_remove_cached_zip_archive_directory_data) - - -# PyPy Python implementation does not allow directly writing to the -# zipimport._zip_directory_cache and so prevents us from attempting to correct -# its content. The best we can do there is clear the problematic cache content -# and have PyPy repopulate it as needed. The downside is that if there are any -# stale zipimport.zipimporter instances laying around, attempting to use them -# will fail due to not having its zip archive directory information available -# instead of being automatically corrected to use the new correct zip archive -# directory information. -if '__pypy__' in sys.builtin_module_names: - _replace_zip_directory_cache_data = \ - _remove_and_clear_zip_directory_cache_data -else: - - def _replace_zip_directory_cache_data(normalized_path): - def replace_cached_zip_archive_directory_data(path, old_entry): - # N.B. In theory, we could load the zip directory information just - # once for all updated path spellings, and then copy it locally and - # update its contained path strings to contain the correct - # spelling, but that seems like a way too invasive move (this cache - # structure is not officially documented anywhere and could in - # theory change with new Python releases) for no significant - # benefit. - old_entry.clear() - zipimport.zipimporter(path) - old_entry.update(zipimport._zip_directory_cache[path]) - return old_entry - - _update_zipimporter_cache( - normalized_path, zipimport._zip_directory_cache, - updater=replace_cached_zip_archive_directory_data) - - -def is_python(text, filename='<string>'): - "Is this string a valid Python script?" - try: - compile(text, filename, 'exec') - except (SyntaxError, TypeError): - return False - else: - return True - - -def is_sh(executable): - """Determine if the specified executable is a .sh (contains a #! line)""" - try: - with io.open(executable, encoding='latin-1') as fp: - magic = fp.read(2) - except (OSError, IOError): - return executable - return magic == '#!' - - -def nt_quote_arg(arg): - """Quote a command line argument according to Windows parsing rules""" - return subprocess.list2cmdline([arg]) - - -def is_python_script(script_text, filename): - """Is this text, as a whole, a Python script? (as opposed to shell/bat/etc. - """ - if filename.endswith('.py') or filename.endswith('.pyw'): - return True # extension says it's Python - if is_python(script_text, filename): - return True # it's syntactically valid Python - if script_text.startswith('#!'): - # It begins with a '#!' line, so check if 'python' is in it somewhere - return 'python' in script_text.splitlines()[0].lower() - - return False # Not any Python I can recognize - - -try: - from os import chmod as _chmod -except ImportError: - # Jython compatibility - def _chmod(*args): - pass - - -def chmod(path, mode): - log.debug("changing mode of %s to %o", path, mode) - try: - _chmod(path, mode) - except os.error as e: - log.debug("chmod failed: %s", e) - - -class CommandSpec(list): - """ - A command spec for a #! header, specified as a list of arguments akin to - those passed to Popen. - """ - - options = [] - split_args = dict() - - @classmethod - def best(cls): - """ - Choose the best CommandSpec class based on environmental conditions. - """ - return cls - - @classmethod - def _sys_executable(cls): - _default = os.path.normpath(sys.executable) - return os.environ.get('__PYVENV_LAUNCHER__', _default) - - @classmethod - def from_param(cls, param): - """ - Construct a CommandSpec from a parameter to build_scripts, which may - be None. - """ - if isinstance(param, cls): - return param - if isinstance(param, list): - return cls(param) - if param is None: - return cls.from_environment() - # otherwise, assume it's a string. - return cls.from_string(param) - - @classmethod - def from_environment(cls): - return cls([cls._sys_executable()]) - - @classmethod - def from_string(cls, string): - """ - Construct a command spec from a simple string representing a command - line parseable by shlex.split. - """ - items = shlex.split(string, **cls.split_args) - return cls(items) - - def install_options(self, script_text): - self.options = shlex.split(self._extract_options(script_text)) - cmdline = subprocess.list2cmdline(self) - if not isascii(cmdline): - self.options[:0] = ['-x'] - - @staticmethod - def _extract_options(orig_script): - """ - Extract any options from the first line of the script. - """ - first = (orig_script + '\n').splitlines()[0] - match = _first_line_re().match(first) - options = match.group(1) or '' if match else '' - return options.strip() - - def as_header(self): - return self._render(self + list(self.options)) - - @staticmethod - def _strip_quotes(item): - _QUOTES = '"\'' - for q in _QUOTES: - if item.startswith(q) and item.endswith(q): - return item[1:-1] - return item - - @staticmethod - def _render(items): - cmdline = subprocess.list2cmdline( - CommandSpec._strip_quotes(item.strip()) for item in items) - return '#!' + cmdline + '\n' - - -# For pbr compat; will be removed in a future version. -sys_executable = CommandSpec._sys_executable() - - -class WindowsCommandSpec(CommandSpec): - split_args = dict(posix=False) - - -class ScriptWriter(object): - """ - Encapsulates behavior around writing entry point scripts for console and - gui apps. - """ - - template = textwrap.dedent(r""" - # EASY-INSTALL-ENTRY-SCRIPT: %(spec)r,%(group)r,%(name)r - __requires__ = %(spec)r - import re - import sys - from pkg_resources import load_entry_point - - if __name__ == '__main__': - sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]) - sys.exit( - load_entry_point(%(spec)r, %(group)r, %(name)r)() - ) - """).lstrip() - - command_spec_class = CommandSpec - - @classmethod - def get_script_args(cls, dist, executable=None, wininst=False): - # for backward compatibility - warnings.warn("Use get_args", DeprecationWarning) - writer = (WindowsScriptWriter if wininst else ScriptWriter).best() - header = cls.get_script_header("", executable, wininst) - return writer.get_args(dist, header) - - @classmethod - def get_script_header(cls, script_text, executable=None, wininst=False): - # for backward compatibility - warnings.warn("Use get_header", DeprecationWarning) - if wininst: - executable = "python.exe" - cmd = cls.command_spec_class.best().from_param(executable) - cmd.install_options(script_text) - return cmd.as_header() - - @classmethod - def get_args(cls, dist, header=None): - """ - Yield write_script() argument tuples for a distribution's - console_scripts and gui_scripts entry points. - """ - if header is None: - header = cls.get_header() - spec = str(dist.as_requirement()) - for type_ in 'console', 'gui': - group = type_ + '_scripts' - for name, ep in dist.get_entry_map(group).items(): - cls._ensure_safe_name(name) - script_text = cls.template % locals() - args = cls._get_script_args(type_, name, header, script_text) - for res in args: - yield res - - @staticmethod - def _ensure_safe_name(name): - """ - Prevent paths in *_scripts entry point names. - """ - has_path_sep = re.search(r'[\\/]', name) - if has_path_sep: - raise ValueError("Path separators not allowed in script names") - - @classmethod - def get_writer(cls, force_windows): - # for backward compatibility - warnings.warn("Use best", DeprecationWarning) - return WindowsScriptWriter.best() if force_windows else cls.best() - - @classmethod - def best(cls): - """ - Select the best ScriptWriter for this environment. - """ - if sys.platform == 'win32' or (os.name == 'java' and os._name == 'nt'): - return WindowsScriptWriter.best() - else: - return cls - - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - # Simply write the stub with no extension. - yield (name, header + script_text) - - @classmethod - def get_header(cls, script_text="", executable=None): - """Create a #! line, getting options (if any) from script_text""" - cmd = cls.command_spec_class.best().from_param(executable) - cmd.install_options(script_text) - return cmd.as_header() - - -class WindowsScriptWriter(ScriptWriter): - command_spec_class = WindowsCommandSpec - - @classmethod - def get_writer(cls): - # for backward compatibility - warnings.warn("Use best", DeprecationWarning) - return cls.best() - - @classmethod - def best(cls): - """ - Select the best ScriptWriter suitable for Windows - """ - writer_lookup = dict( - executable=WindowsExecutableLauncherWriter, - natural=cls, - ) - # for compatibility, use the executable launcher by default - launcher = os.environ.get('SETUPTOOLS_LAUNCHER', 'executable') - return writer_lookup[launcher] - - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - "For Windows, add a .py extension" - ext = dict(console='.pya', gui='.pyw')[type_] - if ext not in os.environ['PATHEXT'].lower().split(';'): - msg = ( - "{ext} not listed in PATHEXT; scripts will not be " - "recognized as executables." - ).format(**locals()) - warnings.warn(msg, UserWarning) - old = ['.pya', '.py', '-script.py', '.pyc', '.pyo', '.pyw', '.exe'] - old.remove(ext) - header = cls._adjust_header(type_, header) - blockers = [name + x for x in old] - yield name + ext, header + script_text, 't', blockers - - @classmethod - def _adjust_header(cls, type_, orig_header): - """ - Make sure 'pythonw' is used for gui and and 'python' is used for - console (regardless of what sys.executable is). - """ - pattern = 'pythonw.exe' - repl = 'python.exe' - if type_ == 'gui': - pattern, repl = repl, pattern - pattern_ob = re.compile(re.escape(pattern), re.IGNORECASE) - new_header = pattern_ob.sub(string=orig_header, repl=repl) - return new_header if cls._use_header(new_header) else orig_header - - @staticmethod - def _use_header(new_header): - """ - Should _adjust_header use the replaced header? - - On non-windows systems, always use. On - Windows systems, only use the replaced header if it resolves - to an executable on the system. - """ - clean_header = new_header[2:-1].strip('"') - return sys.platform != 'win32' or find_executable(clean_header) - - -class WindowsExecutableLauncherWriter(WindowsScriptWriter): - @classmethod - def _get_script_args(cls, type_, name, header, script_text): - """ - For Windows, add a .py extension and an .exe launcher - """ - if type_ == 'gui': - launcher_type = 'gui' - ext = '-script.pyw' - old = ['.pyw'] - else: - launcher_type = 'cli' - ext = '-script.py' - old = ['.py', '.pyc', '.pyo'] - hdr = cls._adjust_header(type_, header) - blockers = [name + x for x in old] - yield (name + ext, hdr + script_text, 't', blockers) - yield ( - name + '.exe', get_win_launcher(launcher_type), - 'b' # write in binary mode - ) - if not is_64bit(): - # install a manifest for the launcher to prevent Windows - # from detecting it as an installer (which it will for - # launchers like easy_install.exe). Consider only - # adding a manifest for launchers detected as installers. - # See Distribute #143 for details. - m_name = name + '.exe.manifest' - yield (m_name, load_launcher_manifest(name), 't') - - -# for backward-compatibility -get_script_args = ScriptWriter.get_script_args -get_script_header = ScriptWriter.get_script_header - - -def get_win_launcher(type): - """ - Load the Windows launcher (executable) suitable for launching a script. - - `type` should be either 'cli' or 'gui' - - Returns the executable as a byte string. - """ - launcher_fn = '%s.exe' % type - if is_64bit(): - launcher_fn = launcher_fn.replace(".", "-64.") - else: - launcher_fn = launcher_fn.replace(".", "-32.") - return resource_string('setuptools', launcher_fn) - - -def load_launcher_manifest(name): - manifest = pkg_resources.resource_string(__name__, 'launcher manifest.xml') - if six.PY2: - return manifest % vars() - else: - return manifest.decode('utf-8') % vars() - - -def rmtree(path, ignore_errors=False, onerror=auto_chmod): - return shutil.rmtree(path, ignore_errors, onerror) - - -def current_umask(): - tmp = os.umask(0o022) - os.umask(tmp) - return tmp - - -def bootstrap(): - # This function is called when setuptools*.egg is run using /bin/sh - import setuptools - - argv0 = os.path.dirname(setuptools.__path__[0]) - sys.argv[0] = argv0 - sys.argv.append(argv0) - main() - - -def main(argv=None, **kw): - from setuptools import setup - from setuptools.dist import Distribution - - class DistributionWithoutHelpCommands(Distribution): - common_usage = "" - - def _show_help(self, *args, **kw): - with _patch_usage(): - Distribution._show_help(self, *args, **kw) - - if argv is None: - argv = sys.argv[1:] - - with _patch_usage(): - setup( - script_args=['-q', 'easy_install', '-v'] + argv, - script_name=sys.argv[0] or 'easy_install', - distclass=DistributionWithoutHelpCommands, - **kw - ) - - -@contextlib.contextmanager -def _patch_usage(): - import distutils.core - USAGE = textwrap.dedent(""" - usage: %(script)s [options] requirement_or_url ... - or: %(script)s --help - """).lstrip() - - def gen_usage(script_name): - return USAGE % dict( - script=os.path.basename(script_name), - ) - - saved = distutils.core.gen_usage - distutils.core.gen_usage = gen_usage - try: - yield - finally: - distutils.core.gen_usage = saved diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/egg_info.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/egg_info.py deleted file mode 100644 index 8b0f448..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/egg_info.py +++ /dev/null @@ -1,696 +0,0 @@ -"""setuptools.command.egg_info - -Create a distribution's .egg-info directory and contents""" - -from distutils.filelist import FileList as _FileList -from distutils.errors import DistutilsInternalError -from distutils.util import convert_path -from distutils import log -import distutils.errors -import distutils.filelist -import os -import re -import sys -import io -import warnings -import time -import collections - -from setuptools.extern import six -from setuptools.extern.six.moves import map - -from setuptools import Command -from setuptools.command.sdist import sdist -from setuptools.command.sdist import walk_revctrl -from setuptools.command.setopt import edit_config -from setuptools.command import bdist_egg -from pkg_resources import ( - parse_requirements, safe_name, parse_version, - safe_version, yield_lines, EntryPoint, iter_entry_points, to_filename) -import setuptools.unicode_utils as unicode_utils -from setuptools.glob import glob - -from setuptools.extern import packaging - - -def translate_pattern(glob): - """ - Translate a file path glob like '*.txt' in to a regular expression. - This differs from fnmatch.translate which allows wildcards to match - directory separators. It also knows about '**/' which matches any number of - directories. - """ - pat = '' - - # This will split on '/' within [character classes]. This is deliberate. - chunks = glob.split(os.path.sep) - - sep = re.escape(os.sep) - valid_char = '[^%s]' % (sep,) - - for c, chunk in enumerate(chunks): - last_chunk = c == len(chunks) - 1 - - # Chunks that are a literal ** are globstars. They match anything. - if chunk == '**': - if last_chunk: - # Match anything if this is the last component - pat += '.*' - else: - # Match '(name/)*' - pat += '(?:%s+%s)*' % (valid_char, sep) - continue # Break here as the whole path component has been handled - - # Find any special characters in the remainder - i = 0 - chunk_len = len(chunk) - while i < chunk_len: - char = chunk[i] - if char == '*': - # Match any number of name characters - pat += valid_char + '*' - elif char == '?': - # Match a name character - pat += valid_char - elif char == '[': - # Character class - inner_i = i + 1 - # Skip initial !/] chars - if inner_i < chunk_len and chunk[inner_i] == '!': - inner_i = inner_i + 1 - if inner_i < chunk_len and chunk[inner_i] == ']': - inner_i = inner_i + 1 - - # Loop till the closing ] is found - while inner_i < chunk_len and chunk[inner_i] != ']': - inner_i = inner_i + 1 - - if inner_i >= chunk_len: - # Got to the end of the string without finding a closing ] - # Do not treat this as a matching group, but as a literal [ - pat += re.escape(char) - else: - # Grab the insides of the [brackets] - inner = chunk[i + 1:inner_i] - char_class = '' - - # Class negation - if inner[0] == '!': - char_class = '^' - inner = inner[1:] - - char_class += re.escape(inner) - pat += '[%s]' % (char_class,) - - # Skip to the end ] - i = inner_i - else: - pat += re.escape(char) - i += 1 - - # Join each chunk with the dir separator - if not last_chunk: - pat += sep - - pat += r'\Z' - return re.compile(pat, flags=re.MULTILINE|re.DOTALL) - - -class egg_info(Command): - description = "create a distribution's .egg-info directory" - - user_options = [ - ('egg-base=', 'e', "directory containing .egg-info directories" - " (default: top of the source tree)"), - ('tag-date', 'd', "Add date stamp (e.g. 20050528) to version number"), - ('tag-build=', 'b', "Specify explicit tag to add to version number"), - ('no-date', 'D', "Don't include date stamp [default]"), - ] - - boolean_options = ['tag-date'] - negative_opt = { - 'no-date': 'tag-date', - } - - def initialize_options(self): - self.egg_name = None - self.egg_version = None - self.egg_base = None - self.egg_info = None - self.tag_build = None - self.tag_date = 0 - self.broken_egg_info = False - self.vtags = None - - #################################### - # allow the 'tag_svn_revision' to be detected and - # set, supporting sdists built on older Setuptools. - @property - def tag_svn_revision(self): - pass - - @tag_svn_revision.setter - def tag_svn_revision(self, value): - pass - #################################### - - def save_version_info(self, filename): - """ - Materialize the value of date into the - build tag. Install build keys in a deterministic order - to avoid arbitrary reordering on subsequent builds. - """ - egg_info = collections.OrderedDict() - # follow the order these keys would have been added - # when PYTHONHASHSEED=0 - egg_info['tag_build'] = self.tags() - egg_info['tag_date'] = 0 - edit_config(filename, dict(egg_info=egg_info)) - - def finalize_options(self): - self.egg_name = safe_name(self.distribution.get_name()) - self.vtags = self.tags() - self.egg_version = self.tagged_version() - - parsed_version = parse_version(self.egg_version) - - try: - is_version = isinstance(parsed_version, packaging.version.Version) - spec = ( - "%s==%s" if is_version else "%s===%s" - ) - list( - parse_requirements(spec % (self.egg_name, self.egg_version)) - ) - except ValueError: - raise distutils.errors.DistutilsOptionError( - "Invalid distribution name or version syntax: %s-%s" % - (self.egg_name, self.egg_version) - ) - - if self.egg_base is None: - dirs = self.distribution.package_dir - self.egg_base = (dirs or {}).get('', os.curdir) - - self.ensure_dirname('egg_base') - self.egg_info = to_filename(self.egg_name) + '.egg-info' - if self.egg_base != os.curdir: - self.egg_info = os.path.join(self.egg_base, self.egg_info) - if '-' in self.egg_name: - self.check_broken_egg_info() - - # Set package version for the benefit of dumber commands - # (e.g. sdist, bdist_wininst, etc.) - # - self.distribution.metadata.version = self.egg_version - - # If we bootstrapped around the lack of a PKG-INFO, as might be the - # case in a fresh checkout, make sure that any special tags get added - # to the version info - # - pd = self.distribution._patched_dist - if pd is not None and pd.key == self.egg_name.lower(): - pd._version = self.egg_version - pd._parsed_version = parse_version(self.egg_version) - self.distribution._patched_dist = None - - def write_or_delete_file(self, what, filename, data, force=False): - """Write `data` to `filename` or delete if empty - - If `data` is non-empty, this routine is the same as ``write_file()``. - If `data` is empty but not ``None``, this is the same as calling - ``delete_file(filename)`. If `data` is ``None``, then this is a no-op - unless `filename` exists, in which case a warning is issued about the - orphaned file (if `force` is false), or deleted (if `force` is true). - """ - if data: - self.write_file(what, filename, data) - elif os.path.exists(filename): - if data is None and not force: - log.warn( - "%s not set in setup(), but %s exists", what, filename - ) - return - else: - self.delete_file(filename) - - def write_file(self, what, filename, data): - """Write `data` to `filename` (if not a dry run) after announcing it - - `what` is used in a log message to identify what is being written - to the file. - """ - log.info("writing %s to %s", what, filename) - if six.PY3: - data = data.encode("utf-8") - if not self.dry_run: - f = open(filename, 'wb') - f.write(data) - f.close() - - def delete_file(self, filename): - """Delete `filename` (if not a dry run) after announcing it""" - log.info("deleting %s", filename) - if not self.dry_run: - os.unlink(filename) - - def tagged_version(self): - version = self.distribution.get_version() - # egg_info may be called more than once for a distribution, - # in which case the version string already contains all tags. - if self.vtags and version.endswith(self.vtags): - return safe_version(version) - return safe_version(version + self.vtags) - - def run(self): - self.mkpath(self.egg_info) - installer = self.distribution.fetch_build_egg - for ep in iter_entry_points('egg_info.writers'): - ep.require(installer=installer) - writer = ep.resolve() - writer(self, ep.name, os.path.join(self.egg_info, ep.name)) - - # Get rid of native_libs.txt if it was put there by older bdist_egg - nl = os.path.join(self.egg_info, "native_libs.txt") - if os.path.exists(nl): - self.delete_file(nl) - - self.find_sources() - - def tags(self): - version = '' - if self.tag_build: - version += self.tag_build - if self.tag_date: - version += time.strftime("-%Y%m%d") - return version - - def find_sources(self): - """Generate SOURCES.txt manifest file""" - manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") - mm = manifest_maker(self.distribution) - mm.manifest = manifest_filename - mm.run() - self.filelist = mm.filelist - - def check_broken_egg_info(self): - bei = self.egg_name + '.egg-info' - if self.egg_base != os.curdir: - bei = os.path.join(self.egg_base, bei) - if os.path.exists(bei): - log.warn( - "-" * 78 + '\n' - "Note: Your current .egg-info directory has a '-' in its name;" - '\nthis will not work correctly with "setup.py develop".\n\n' - 'Please rename %s to %s to correct this problem.\n' + '-' * 78, - bei, self.egg_info - ) - self.broken_egg_info = self.egg_info - self.egg_info = bei # make it work for now - - -class FileList(_FileList): - # Implementations of the various MANIFEST.in commands - - def process_template_line(self, line): - # Parse the line: split it up, make sure the right number of words - # is there, and return the relevant words. 'action' is always - # defined: it's the first word of the line. Which of the other - # three are defined depends on the action; it'll be either - # patterns, (dir and patterns), or (dir_pattern). - (action, patterns, dir, dir_pattern) = self._parse_template_line(line) - - # OK, now we know that the action is valid and we have the - # right number of words on the line for that action -- so we - # can proceed with minimal error-checking. - if action == 'include': - self.debug_print("include " + ' '.join(patterns)) - for pattern in patterns: - if not self.include(pattern): - log.warn("warning: no files found matching '%s'", pattern) - - elif action == 'exclude': - self.debug_print("exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.exclude(pattern): - log.warn(("warning: no previously-included files " - "found matching '%s'"), pattern) - - elif action == 'global-include': - self.debug_print("global-include " + ' '.join(patterns)) - for pattern in patterns: - if not self.global_include(pattern): - log.warn(("warning: no files found matching '%s' " - "anywhere in distribution"), pattern) - - elif action == 'global-exclude': - self.debug_print("global-exclude " + ' '.join(patterns)) - for pattern in patterns: - if not self.global_exclude(pattern): - log.warn(("warning: no previously-included files matching " - "'%s' found anywhere in distribution"), - pattern) - - elif action == 'recursive-include': - self.debug_print("recursive-include %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.recursive_include(dir, pattern): - log.warn(("warning: no files found matching '%s' " - "under directory '%s'"), - pattern, dir) - - elif action == 'recursive-exclude': - self.debug_print("recursive-exclude %s %s" % - (dir, ' '.join(patterns))) - for pattern in patterns: - if not self.recursive_exclude(dir, pattern): - log.warn(("warning: no previously-included files matching " - "'%s' found under directory '%s'"), - pattern, dir) - - elif action == 'graft': - self.debug_print("graft " + dir_pattern) - if not self.graft(dir_pattern): - log.warn("warning: no directories found matching '%s'", - dir_pattern) - - elif action == 'prune': - self.debug_print("prune " + dir_pattern) - if not self.prune(dir_pattern): - log.warn(("no previously-included directories found " - "matching '%s'"), dir_pattern) - - else: - raise DistutilsInternalError( - "this cannot happen: invalid action '%s'" % action) - - def _remove_files(self, predicate): - """ - Remove all files from the file list that match the predicate. - Return True if any matching files were removed - """ - found = False - for i in range(len(self.files) - 1, -1, -1): - if predicate(self.files[i]): - self.debug_print(" removing " + self.files[i]) - del self.files[i] - found = True - return found - - def include(self, pattern): - """Include files that match 'pattern'.""" - found = [f for f in glob(pattern) if not os.path.isdir(f)] - self.extend(found) - return bool(found) - - def exclude(self, pattern): - """Exclude files that match 'pattern'.""" - match = translate_pattern(pattern) - return self._remove_files(match.match) - - def recursive_include(self, dir, pattern): - """ - Include all files anywhere in 'dir/' that match the pattern. - """ - full_pattern = os.path.join(dir, '**', pattern) - found = [f for f in glob(full_pattern, recursive=True) - if not os.path.isdir(f)] - self.extend(found) - return bool(found) - - def recursive_exclude(self, dir, pattern): - """ - Exclude any file anywhere in 'dir/' that match the pattern. - """ - match = translate_pattern(os.path.join(dir, '**', pattern)) - return self._remove_files(match.match) - - def graft(self, dir): - """Include all files from 'dir/'.""" - found = [ - item - for match_dir in glob(dir) - for item in distutils.filelist.findall(match_dir) - ] - self.extend(found) - return bool(found) - - def prune(self, dir): - """Filter out files from 'dir/'.""" - match = translate_pattern(os.path.join(dir, '**')) - return self._remove_files(match.match) - - def global_include(self, pattern): - """ - Include all files anywhere in the current directory that match the - pattern. This is very inefficient on large file trees. - """ - if self.allfiles is None: - self.findall() - match = translate_pattern(os.path.join('**', pattern)) - found = [f for f in self.allfiles if match.match(f)] - self.extend(found) - return bool(found) - - def global_exclude(self, pattern): - """ - Exclude all files anywhere that match the pattern. - """ - match = translate_pattern(os.path.join('**', pattern)) - return self._remove_files(match.match) - - def append(self, item): - if item.endswith('\r'): # Fix older sdists built on Windows - item = item[:-1] - path = convert_path(item) - - if self._safe_path(path): - self.files.append(path) - - def extend(self, paths): - self.files.extend(filter(self._safe_path, paths)) - - def _repair(self): - """ - Replace self.files with only safe paths - - Because some owners of FileList manipulate the underlying - ``files`` attribute directly, this method must be called to - repair those paths. - """ - self.files = list(filter(self._safe_path, self.files)) - - def _safe_path(self, path): - enc_warn = "'%s' not %s encodable -- skipping" - - # To avoid accidental trans-codings errors, first to unicode - u_path = unicode_utils.filesys_decode(path) - if u_path is None: - log.warn("'%s' in unexpected encoding -- skipping" % path) - return False - - # Must ensure utf-8 encodability - utf8_path = unicode_utils.try_encode(u_path, "utf-8") - if utf8_path is None: - log.warn(enc_warn, path, 'utf-8') - return False - - try: - # accept is either way checks out - if os.path.exists(u_path) or os.path.exists(utf8_path): - return True - # this will catch any encode errors decoding u_path - except UnicodeEncodeError: - log.warn(enc_warn, path, sys.getfilesystemencoding()) - - -class manifest_maker(sdist): - template = "MANIFEST.in" - - def initialize_options(self): - self.use_defaults = 1 - self.prune = 1 - self.manifest_only = 1 - self.force_manifest = 1 - - def finalize_options(self): - pass - - def run(self): - self.filelist = FileList() - if not os.path.exists(self.manifest): - self.write_manifest() # it must exist so it'll get in the list - self.add_defaults() - if os.path.exists(self.template): - self.read_template() - self.prune_file_list() - self.filelist.sort() - self.filelist.remove_duplicates() - self.write_manifest() - - def _manifest_normalize(self, path): - path = unicode_utils.filesys_decode(path) - return path.replace(os.sep, '/') - - def write_manifest(self): - """ - Write the file list in 'self.filelist' to the manifest file - named by 'self.manifest'. - """ - self.filelist._repair() - - # Now _repairs should encodability, but not unicode - files = [self._manifest_normalize(f) for f in self.filelist.files] - msg = "writing manifest file '%s'" % self.manifest - self.execute(write_file, (self.manifest, files), msg) - - def warn(self, msg): - if not self._should_suppress_warning(msg): - sdist.warn(self, msg) - - @staticmethod - def _should_suppress_warning(msg): - """ - suppress missing-file warnings from sdist - """ - return re.match(r"standard file .*not found", msg) - - def add_defaults(self): - sdist.add_defaults(self) - self.filelist.append(self.template) - self.filelist.append(self.manifest) - rcfiles = list(walk_revctrl()) - if rcfiles: - self.filelist.extend(rcfiles) - elif os.path.exists(self.manifest): - self.read_manifest() - ei_cmd = self.get_finalized_command('egg_info') - self.filelist.graft(ei_cmd.egg_info) - - def prune_file_list(self): - build = self.get_finalized_command('build') - base_dir = self.distribution.get_fullname() - self.filelist.prune(build.build_base) - self.filelist.prune(base_dir) - sep = re.escape(os.sep) - self.filelist.exclude_pattern(r'(^|' + sep + r')(RCS|CVS|\.svn)' + sep, - is_regex=1) - - -def write_file(filename, contents): - """Create a file with the specified name and write 'contents' (a - sequence of strings without line terminators) to it. - """ - contents = "\n".join(contents) - - # assuming the contents has been vetted for utf-8 encoding - contents = contents.encode("utf-8") - - with open(filename, "wb") as f: # always write POSIX-style manifest - f.write(contents) - - -def write_pkg_info(cmd, basename, filename): - log.info("writing %s", filename) - if not cmd.dry_run: - metadata = cmd.distribution.metadata - metadata.version, oldver = cmd.egg_version, metadata.version - metadata.name, oldname = cmd.egg_name, metadata.name - - try: - # write unescaped data to PKG-INFO, so older pkg_resources - # can still parse it - metadata.write_pkg_info(cmd.egg_info) - finally: - metadata.name, metadata.version = oldname, oldver - - safe = getattr(cmd.distribution, 'zip_safe', None) - - bdist_egg.write_safety_flag(cmd.egg_info, safe) - - -def warn_depends_obsolete(cmd, basename, filename): - if os.path.exists(filename): - log.warn( - "WARNING: 'depends.txt' is not used by setuptools 0.6!\n" - "Use the install_requires/extras_require setup() args instead." - ) - - -def _write_requirements(stream, reqs): - lines = yield_lines(reqs or ()) - append_cr = lambda line: line + '\n' - lines = map(append_cr, sorted(lines)) - stream.writelines(lines) - - -def write_requirements(cmd, basename, filename): - dist = cmd.distribution - data = six.StringIO() - _write_requirements(data, dist.install_requires) - extras_require = dist.extras_require or {} - for extra in sorted(extras_require): - data.write('\n[{extra}]\n'.format(**vars())) - _write_requirements(data, extras_require[extra]) - cmd.write_or_delete_file("requirements", filename, data.getvalue()) - - -def write_setup_requirements(cmd, basename, filename): - data = io.StringIO() - _write_requirements(data, cmd.distribution.setup_requires) - cmd.write_or_delete_file("setup-requirements", filename, data.getvalue()) - - -def write_toplevel_names(cmd, basename, filename): - pkgs = dict.fromkeys( - [ - k.split('.', 1)[0] - for k in cmd.distribution.iter_distribution_names() - ] - ) - cmd.write_file("top-level names", filename, '\n'.join(sorted(pkgs)) + '\n') - - -def overwrite_arg(cmd, basename, filename): - write_arg(cmd, basename, filename, True) - - -def write_arg(cmd, basename, filename, force=False): - argname = os.path.splitext(basename)[0] - value = getattr(cmd.distribution, argname, None) - if value is not None: - value = '\n'.join(value) + '\n' - cmd.write_or_delete_file(argname, filename, value, force) - - -def write_entries(cmd, basename, filename): - ep = cmd.distribution.entry_points - - if isinstance(ep, six.string_types) or ep is None: - data = ep - elif ep is not None: - data = [] - for section, contents in sorted(ep.items()): - if not isinstance(contents, six.string_types): - contents = EntryPoint.parse_group(section, contents) - contents = '\n'.join(sorted(map(str, contents.values()))) - data.append('[%s]\n%s\n\n' % (section, contents)) - data = ''.join(data) - - cmd.write_or_delete_file('entry points', filename, data, True) - - -def get_pkg_info_revision(): - """ - Get a -r### off of PKG-INFO Version in case this is an sdist of - a subversion revision. - """ - warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning) - if os.path.exists('PKG-INFO'): - with io.open('PKG-INFO') as f: - for line in f: - match = re.match(r"Version:.*-r(\d+)\s*$", line) - if match: - return int(match.group(1)) - return 0 diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install.py deleted file mode 100644 index 31a5ddb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install.py +++ /dev/null @@ -1,125 +0,0 @@ -from distutils.errors import DistutilsArgError -import inspect -import glob -import warnings -import platform -import distutils.command.install as orig - -import setuptools - -# Prior to numpy 1.9, NumPy relies on the '_install' name, so provide it for -# now. See https://github.com/pypa/setuptools/issues/199/ -_install = orig.install - - -class install(orig.install): - """Use easy_install to install the package, w/dependencies""" - - user_options = orig.install.user_options + [ - ('old-and-unmanageable', None, "Try not to use this!"), - ('single-version-externally-managed', None, - "used by system package builders to create 'flat' eggs"), - ] - boolean_options = orig.install.boolean_options + [ - 'old-and-unmanageable', 'single-version-externally-managed', - ] - new_commands = [ - ('install_egg_info', lambda self: True), - ('install_scripts', lambda self: True), - ] - _nc = dict(new_commands) - - def initialize_options(self): - orig.install.initialize_options(self) - self.old_and_unmanageable = None - self.single_version_externally_managed = None - - def finalize_options(self): - orig.install.finalize_options(self) - if self.root: - self.single_version_externally_managed = True - elif self.single_version_externally_managed: - if not self.root and not self.record: - raise DistutilsArgError( - "You must specify --record or --root when building system" - " packages" - ) - - def handle_extra_path(self): - if self.root or self.single_version_externally_managed: - # explicit backward-compatibility mode, allow extra_path to work - return orig.install.handle_extra_path(self) - - # Ignore extra_path when installing an egg (or being run by another - # command without --root or --single-version-externally-managed - self.path_file = None - self.extra_dirs = '' - - def run(self): - # Explicit request for old-style install? Just do it - if self.old_and_unmanageable or self.single_version_externally_managed: - return orig.install.run(self) - - if not self._called_from_setup(inspect.currentframe()): - # Run in backward-compatibility mode to support bdist_* commands. - orig.install.run(self) - else: - self.do_egg_install() - - @staticmethod - def _called_from_setup(run_frame): - """ - Attempt to detect whether run() was called from setup() or by another - command. If called by setup(), the parent caller will be the - 'run_command' method in 'distutils.dist', and *its* caller will be - the 'run_commands' method. If called any other way, the - immediate caller *might* be 'run_command', but it won't have been - called by 'run_commands'. Return True in that case or if a call stack - is unavailable. Return False otherwise. - """ - if run_frame is None: - msg = "Call stack not available. bdist_* commands may fail." - warnings.warn(msg) - if platform.python_implementation() == 'IronPython': - msg = "For best results, pass -X:Frames to enable call stack." - warnings.warn(msg) - return True - res = inspect.getouterframes(run_frame)[2] - caller, = res[:1] - info = inspect.getframeinfo(caller) - caller_module = caller.f_globals.get('__name__', '') - return ( - caller_module == 'distutils.dist' - and info.function == 'run_commands' - ) - - def do_egg_install(self): - - easy_install = self.distribution.get_command_class('easy_install') - - cmd = easy_install( - self.distribution, args="x", root=self.root, record=self.record, - ) - cmd.ensure_finalized() # finalize before bdist_egg munges install cmd - cmd.always_copy_from = '.' # make sure local-dir eggs get installed - - # pick up setup-dir .egg files only: no .egg-info - cmd.package_index.scan(glob.glob('*.egg')) - - self.run_command('bdist_egg') - args = [self.distribution.get_command_obj('bdist_egg').egg_output] - - if setuptools.bootstrap_install_from: - # Bootstrap self-installation of setuptools - args.insert(0, setuptools.bootstrap_install_from) - - cmd.args = args - cmd.run() - setuptools.bootstrap_install_from = None - - -# XXX Python 3.1 doesn't see _nc if this is inside the class -install.sub_commands = ( - [cmd for cmd in orig.install.sub_commands if cmd[0] not in install._nc] + - install.new_commands -) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_egg_info.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_egg_info.py deleted file mode 100644 index 5f405bc..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_egg_info.py +++ /dev/null @@ -1,82 +0,0 @@ -from distutils import log, dir_util -import os, sys - -from setuptools import Command -from setuptools import namespaces -from setuptools.archive_util import unpack_archive -import pkg_resources - - -class install_egg_info(namespaces.Installer, Command): - """Install an .egg-info directory for the package""" - - description = "Install an .egg-info directory for the package" - - user_options = [ - ('install-dir=', 'd', "directory to install to"), - ] - - def initialize_options(self): - self.install_dir = None - self.install_layout = None - self.prefix_option = None - - def finalize_options(self): - self.set_undefined_options('install_lib', - ('install_dir', 'install_dir')) - self.set_undefined_options('install',('install_layout','install_layout')) - if sys.hexversion > 0x2060000: - self.set_undefined_options('install',('prefix_option','prefix_option')) - ei_cmd = self.get_finalized_command("egg_info") - basename = pkg_resources.Distribution( - None, None, ei_cmd.egg_name, ei_cmd.egg_version - ).egg_name() + '.egg-info' - - if self.install_layout: - if not self.install_layout.lower() in ['deb']: - raise DistutilsOptionError("unknown value for --install-layout") - self.install_layout = self.install_layout.lower() - basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '') - elif self.prefix_option or 'real_prefix' in sys.__dict__: - # don't modify for virtualenv - pass - else: - basename = basename.replace('-py%s' % pkg_resources.PY_MAJOR, '') - - self.source = ei_cmd.egg_info - self.target = os.path.join(self.install_dir, basename) - self.outputs = [] - - def run(self): - self.run_command('egg_info') - if os.path.isdir(self.target) and not os.path.islink(self.target): - dir_util.remove_tree(self.target, dry_run=self.dry_run) - elif os.path.exists(self.target): - self.execute(os.unlink, (self.target,), "Removing " + self.target) - if not self.dry_run: - pkg_resources.ensure_directory(self.target) - self.execute( - self.copytree, (), "Copying %s to %s" % (self.source, self.target) - ) - self.install_namespaces() - - def get_outputs(self): - return self.outputs - - def copytree(self): - # Copy the .egg-info tree to site-packages - def skimmer(src, dst): - # filter out source-control directories; note that 'src' is always - # a '/'-separated path, regardless of platform. 'dst' is a - # platform-specific path. - for skip in '.svn/', 'CVS/': - if src.startswith(skip) or '/' + skip in src: - return None - if self.install_layout and self.install_layout in ['deb'] and src.startswith('SOURCES.txt'): - log.info("Skipping SOURCES.txt") - return None - self.outputs.append(dst) - log.debug("Copying %s to %s", src, dst) - return dst - - unpack_archive(self.source, self.target, skimmer) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_lib.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_lib.py deleted file mode 100644 index 578e002..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_lib.py +++ /dev/null @@ -1,148 +0,0 @@ -import os -import sys -import imp -from itertools import product, starmap -import distutils.command.install_lib as orig - - -class install_lib(orig.install_lib): - """Don't add compiled flags to filenames of non-Python files""" - - def initialize_options(self): - orig.install_lib.initialize_options(self) - self.multiarch = None - self.install_layout = None - - def finalize_options(self): - orig.install_lib.finalize_options(self) - self.set_undefined_options('install',('install_layout','install_layout')) - if self.install_layout == 'deb' and sys.version_info[:2] >= (3, 3): - import sysconfig - self.multiarch = sysconfig.get_config_var('MULTIARCH') - - def run(self): - self.build() - outfiles = self.install() - if outfiles is not None: - # always compile, in case we have any extension stubs to deal with - self.byte_compile(outfiles) - - def get_exclusions(self): - """ - Return a collections.Sized collections.Container of paths to be - excluded for single_version_externally_managed installations. - """ - all_packages = ( - pkg - for ns_pkg in self._get_SVEM_NSPs() - for pkg in self._all_packages(ns_pkg) - ) - - excl_specs = product(all_packages, self._gen_exclusion_paths()) - return set(starmap(self._exclude_pkg_path, excl_specs)) - - def _exclude_pkg_path(self, pkg, exclusion_path): - """ - Given a package name and exclusion path within that package, - compute the full exclusion path. - """ - parts = pkg.split('.') + [exclusion_path] - return os.path.join(self.install_dir, *parts) - - @staticmethod - def _all_packages(pkg_name): - """ - >>> list(install_lib._all_packages('foo.bar.baz')) - ['foo.bar.baz', 'foo.bar', 'foo'] - """ - while pkg_name: - yield pkg_name - pkg_name, sep, child = pkg_name.rpartition('.') - - def _get_SVEM_NSPs(self): - """ - Get namespace packages (list) but only for - single_version_externally_managed installations and empty otherwise. - """ - # TODO: is it necessary to short-circuit here? i.e. what's the cost - # if get_finalized_command is called even when namespace_packages is - # False? - if not self.distribution.namespace_packages: - return [] - - install_cmd = self.get_finalized_command('install') - svem = install_cmd.single_version_externally_managed - - return self.distribution.namespace_packages if svem else [] - - @staticmethod - def _gen_exclusion_paths(): - """ - Generate file paths to be excluded for namespace packages (bytecode - cache files). - """ - # always exclude the package module itself - yield '__init__.py' - - yield '__init__.pyc' - yield '__init__.pyo' - - if not hasattr(imp, 'get_tag'): - return - - base = os.path.join('__pycache__', '__init__.' + imp.get_tag()) - yield base + '.pyc' - yield base + '.pyo' - yield base + '.opt-1.pyc' - yield base + '.opt-2.pyc' - - def copy_tree( - self, infile, outfile, - preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1 - ): - assert preserve_mode and preserve_times and not preserve_symlinks - exclude = self.get_exclusions() - - if not exclude: - import distutils.dir_util - distutils.dir_util._multiarch = self.multiarch - return orig.install_lib.copy_tree(self, infile, outfile) - - # Exclude namespace package __init__.py* files from the output - - from setuptools.archive_util import unpack_directory - from distutils import log - - outfiles = [] - - if self.multiarch: - import sysconfig - ext_suffix = sysconfig.get_config_var ('EXT_SUFFIX') - if ext_suffix.endswith(self.multiarch + ext_suffix[-3:]): - new_suffix = None - else: - new_suffix = "%s-%s%s" % (ext_suffix[:-3], self.multiarch, ext_suffix[-3:]) - - def pf(src, dst): - if dst in exclude: - log.warn("Skipping installation of %s (namespace package)", - dst) - return False - - if self.multiarch and new_suffix and dst.endswith(ext_suffix) and not dst.endswith(new_suffix): - dst = dst.replace(ext_suffix, new_suffix) - log.info("renaming extension to %s", os.path.basename(dst)) - - log.info("copying %s -> %s", src, os.path.dirname(dst)) - outfiles.append(dst) - return dst - - unpack_directory(infile, outfile, pf) - return outfiles - - def get_outputs(self): - outputs = orig.install_lib.get_outputs(self) - exclude = self.get_exclusions() - if exclude: - return [f for f in outputs if f not in exclude] - return outputs diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_scripts.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_scripts.py deleted file mode 100644 index 1623427..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/install_scripts.py +++ /dev/null @@ -1,65 +0,0 @@ -from distutils import log -import distutils.command.install_scripts as orig -import os -import sys - -from pkg_resources import Distribution, PathMetadata, ensure_directory - - -class install_scripts(orig.install_scripts): - """Do normal script install, plus any egg_info wrapper scripts""" - - def initialize_options(self): - orig.install_scripts.initialize_options(self) - self.no_ep = False - - def run(self): - import setuptools.command.easy_install as ei - - self.run_command("egg_info") - if self.distribution.scripts: - orig.install_scripts.run(self) # run first to set up self.outfiles - else: - self.outfiles = [] - if self.no_ep: - # don't install entry point scripts into .egg file! - return - - ei_cmd = self.get_finalized_command("egg_info") - dist = Distribution( - ei_cmd.egg_base, PathMetadata(ei_cmd.egg_base, ei_cmd.egg_info), - ei_cmd.egg_name, ei_cmd.egg_version, - ) - bs_cmd = self.get_finalized_command('build_scripts') - exec_param = getattr(bs_cmd, 'executable', None) - bw_cmd = self.get_finalized_command("bdist_wininst") - is_wininst = getattr(bw_cmd, '_is_running', False) - writer = ei.ScriptWriter - if is_wininst: - exec_param = "python.exe" - writer = ei.WindowsScriptWriter - if exec_param == sys.executable: - # In case the path to the Python executable contains a space, wrap - # it so it's not split up. - exec_param = [exec_param] - # resolve the writer to the environment - writer = writer.best() - cmd = writer.command_spec_class.best().from_param(exec_param) - for args in writer.get_args(dist, cmd.as_header()): - self.write_script(*args) - - def write_script(self, script_name, contents, mode="t", *ignored): - """Write an executable file to the scripts directory""" - from setuptools.command.easy_install import chmod, current_umask - - log.info("Installing %s script to %s", script_name, self.install_dir) - target = os.path.join(self.install_dir, script_name) - self.outfiles.append(target) - - mask = current_umask() - if not self.dry_run: - ensure_directory(target) - f = open(target, "w" + mode) - f.write(contents) - f.close() - chmod(target, 0o777 - mask) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/launcher manifest.xml b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/launcher manifest.xml deleted file mode 100644 index 5972a96..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/launcher manifest.xml +++ /dev/null @@ -1,15 +0,0 @@ -<?xml version="1.0" encoding="UTF-8" standalone="yes"?> -<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0"> - <assemblyIdentity version="1.0.0.0" - processorArchitecture="X86" - name="%(name)s" - type="win32"/> - <!-- Identify the application security requirements. --> - <trustInfo xmlns="urn:schemas-microsoft-com:asm.v3"> - <security> - <requestedPrivileges> - <requestedExecutionLevel level="asInvoker" uiAccess="false"/> - </requestedPrivileges> - </security> - </trustInfo> -</assembly> diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/py36compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/py36compat.py deleted file mode 100644 index 61063e7..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/py36compat.py +++ /dev/null @@ -1,136 +0,0 @@ -import os -from glob import glob -from distutils.util import convert_path -from distutils.command import sdist - -from setuptools.extern.six.moves import filter - - -class sdist_add_defaults: - """ - Mix-in providing forward-compatibility for functionality as found in - distutils on Python 3.7. - - Do not edit the code in this class except to update functionality - as implemented in distutils. Instead, override in the subclass. - """ - - def add_defaults(self): - """Add all the default files to self.filelist: - - README or README.txt - - setup.py - - test/test*.py - - all pure Python modules mentioned in setup script - - all files pointed by package_data (build_py) - - all files defined in data_files. - - all files defined as scripts. - - all C sources listed as part of extensions or C libraries - in the setup script (doesn't catch C headers!) - Warns if (README or README.txt) or setup.py are missing; everything - else is optional. - """ - self._add_defaults_standards() - self._add_defaults_optional() - self._add_defaults_python() - self._add_defaults_data_files() - self._add_defaults_ext() - self._add_defaults_c_libs() - self._add_defaults_scripts() - - @staticmethod - def _cs_path_exists(fspath): - """ - Case-sensitive path existence check - - >>> sdist_add_defaults._cs_path_exists(__file__) - True - >>> sdist_add_defaults._cs_path_exists(__file__.upper()) - False - """ - if not os.path.exists(fspath): - return False - # make absolute so we always have a directory - abspath = os.path.abspath(fspath) - directory, filename = os.path.split(abspath) - return filename in os.listdir(directory) - - def _add_defaults_standards(self): - standards = [self.READMES, self.distribution.script_name] - for fn in standards: - if isinstance(fn, tuple): - alts = fn - got_it = False - for fn in alts: - if self._cs_path_exists(fn): - got_it = True - self.filelist.append(fn) - break - - if not got_it: - self.warn("standard file not found: should have one of " + - ', '.join(alts)) - else: - if self._cs_path_exists(fn): - self.filelist.append(fn) - else: - self.warn("standard file '%s' not found" % fn) - - def _add_defaults_optional(self): - optional = ['test/test*.py', 'setup.cfg'] - for pattern in optional: - files = filter(os.path.isfile, glob(pattern)) - self.filelist.extend(files) - - def _add_defaults_python(self): - # build_py is used to get: - # - python modules - # - files defined in package_data - build_py = self.get_finalized_command('build_py') - - # getting python files - if self.distribution.has_pure_modules(): - self.filelist.extend(build_py.get_source_files()) - - # getting package_data files - # (computed in build_py.data_files by build_py.finalize_options) - for pkg, src_dir, build_dir, filenames in build_py.data_files: - for filename in filenames: - self.filelist.append(os.path.join(src_dir, filename)) - - def _add_defaults_data_files(self): - # getting distribution.data_files - if self.distribution.has_data_files(): - for item in self.distribution.data_files: - if isinstance(item, str): - # plain file - item = convert_path(item) - if os.path.isfile(item): - self.filelist.append(item) - else: - # a (dirname, filenames) tuple - dirname, filenames = item - for f in filenames: - f = convert_path(f) - if os.path.isfile(f): - self.filelist.append(f) - - def _add_defaults_ext(self): - if self.distribution.has_ext_modules(): - build_ext = self.get_finalized_command('build_ext') - self.filelist.extend(build_ext.get_source_files()) - - def _add_defaults_c_libs(self): - if self.distribution.has_c_libraries(): - build_clib = self.get_finalized_command('build_clib') - self.filelist.extend(build_clib.get_source_files()) - - def _add_defaults_scripts(self): - if self.distribution.has_scripts(): - build_scripts = self.get_finalized_command('build_scripts') - self.filelist.extend(build_scripts.get_source_files()) - - -if hasattr(sdist.sdist, '_add_defaults_standards'): - # disable the functionality already available upstream - class sdist_add_defaults: - pass diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/register.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/register.py deleted file mode 100644 index 8d6336a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/register.py +++ /dev/null @@ -1,10 +0,0 @@ -import distutils.command.register as orig - - -class register(orig.register): - __doc__ = orig.register.__doc__ - - def run(self): - # Make sure that we are using valid current name/version info - self.run_command('egg_info') - orig.register.run(self) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/rotate.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/rotate.py deleted file mode 100644 index b89353f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/rotate.py +++ /dev/null @@ -1,66 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import os -import shutil - -from setuptools.extern import six - -from setuptools import Command - - -class rotate(Command): - """Delete older distributions""" - - description = "delete older distributions, keeping N newest files" - user_options = [ - ('match=', 'm', "patterns to match (required)"), - ('dist-dir=', 'd', "directory where the distributions are"), - ('keep=', 'k', "number of matching distributions to keep"), - ] - - boolean_options = [] - - def initialize_options(self): - self.match = None - self.dist_dir = None - self.keep = None - - def finalize_options(self): - if self.match is None: - raise DistutilsOptionError( - "Must specify one or more (comma-separated) match patterns " - "(e.g. '.zip' or '.egg')" - ) - if self.keep is None: - raise DistutilsOptionError("Must specify number of files to keep") - try: - self.keep = int(self.keep) - except ValueError: - raise DistutilsOptionError("--keep must be an integer") - if isinstance(self.match, six.string_types): - self.match = [ - convert_path(p.strip()) for p in self.match.split(',') - ] - self.set_undefined_options('bdist', ('dist_dir', 'dist_dir')) - - def run(self): - self.run_command("egg_info") - from glob import glob - - for pattern in self.match: - pattern = self.distribution.get_name() + '*' + pattern - files = glob(os.path.join(self.dist_dir, pattern)) - files = [(os.path.getmtime(f), f) for f in files] - files.sort() - files.reverse() - - log.info("%d file(s) matching %s", len(files), pattern) - files = files[self.keep:] - for (t, f) in files: - log.info("Deleting %s", f) - if not self.dry_run: - if os.path.isdir(f): - shutil.rmtree(f) - else: - os.unlink(f) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/saveopts.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/saveopts.py deleted file mode 100644 index 611cec5..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/saveopts.py +++ /dev/null @@ -1,22 +0,0 @@ -from setuptools.command.setopt import edit_config, option_base - - -class saveopts(option_base): - """Save command-line options to a file""" - - description = "save supplied options to setup.cfg or other config file" - - def run(self): - dist = self.distribution - settings = {} - - for cmd in dist.command_options: - - if cmd == 'saveopts': - continue # don't save our own options! - - for opt, (src, val) in dist.get_option_dict(cmd).items(): - if src == "command line": - settings.setdefault(cmd, {})[opt] = val - - edit_config(self.filename, settings, self.dry_run) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/sdist.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/sdist.py deleted file mode 100644 index bcfae4d..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/sdist.py +++ /dev/null @@ -1,200 +0,0 @@ -from distutils import log -import distutils.command.sdist as orig -import os -import sys -import io -import contextlib - -from setuptools.extern import six - -from .py36compat import sdist_add_defaults - -import pkg_resources - -_default_revctrl = list - - -def walk_revctrl(dirname=''): - """Find all files under revision control""" - for ep in pkg_resources.iter_entry_points('setuptools.file_finders'): - for item in ep.load()(dirname): - yield item - - -class sdist(sdist_add_defaults, orig.sdist): - """Smart sdist that finds anything supported by revision control""" - - user_options = [ - ('formats=', None, - "formats for source distribution (comma-separated list)"), - ('keep-temp', 'k', - "keep the distribution tree around after creating " + - "archive file(s)"), - ('dist-dir=', 'd', - "directory to put the source distribution archive(s) in " - "[default: dist]"), - ] - - negative_opt = {} - - README_EXTENSIONS = ['', '.rst', '.txt', '.md'] - READMES = tuple('README{0}'.format(ext) for ext in README_EXTENSIONS) - - def run(self): - self.run_command('egg_info') - ei_cmd = self.get_finalized_command('egg_info') - self.filelist = ei_cmd.filelist - self.filelist.append(os.path.join(ei_cmd.egg_info, 'SOURCES.txt')) - self.check_readme() - - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - self.make_distribution() - - dist_files = getattr(self.distribution, 'dist_files', []) - for file in self.archive_files: - data = ('sdist', '', file) - if data not in dist_files: - dist_files.append(data) - - def initialize_options(self): - orig.sdist.initialize_options(self) - - self._default_to_gztar() - - def _default_to_gztar(self): - # only needed on Python prior to 3.6. - if sys.version_info >= (3, 6, 0, 'beta', 1): - return - self.formats = ['gztar'] - - def make_distribution(self): - """ - Workaround for #516 - """ - with self._remove_os_link(): - orig.sdist.make_distribution(self) - - @staticmethod - @contextlib.contextmanager - def _remove_os_link(): - """ - In a context, remove and restore os.link if it exists - """ - - class NoValue: - pass - - orig_val = getattr(os, 'link', NoValue) - try: - del os.link - except Exception: - pass - try: - yield - finally: - if orig_val is not NoValue: - setattr(os, 'link', orig_val) - - def __read_template_hack(self): - # This grody hack closes the template file (MANIFEST.in) if an - # exception occurs during read_template. - # Doing so prevents an error when easy_install attempts to delete the - # file. - try: - orig.sdist.read_template(self) - except Exception: - _, _, tb = sys.exc_info() - tb.tb_next.tb_frame.f_locals['template'].close() - raise - - # Beginning with Python 2.7.2, 3.1.4, and 3.2.1, this leaky file handle - # has been fixed, so only override the method if we're using an earlier - # Python. - has_leaky_handle = ( - sys.version_info < (2, 7, 2) - or (3, 0) <= sys.version_info < (3, 1, 4) - or (3, 2) <= sys.version_info < (3, 2, 1) - ) - if has_leaky_handle: - read_template = __read_template_hack - - def _add_defaults_python(self): - """getting python files""" - if self.distribution.has_pure_modules(): - build_py = self.get_finalized_command('build_py') - self.filelist.extend(build_py.get_source_files()) - # This functionality is incompatible with include_package_data, and - # will in fact create an infinite recursion if include_package_data - # is True. Use of include_package_data will imply that - # distutils-style automatic handling of package_data is disabled - if not self.distribution.include_package_data: - for _, src_dir, _, filenames in build_py.data_files: - self.filelist.extend([os.path.join(src_dir, filename) - for filename in filenames]) - - def _add_defaults_data_files(self): - try: - if six.PY2: - sdist_add_defaults._add_defaults_data_files(self) - else: - super()._add_defaults_data_files() - except TypeError: - log.warn("data_files contains unexpected objects") - - def check_readme(self): - for f in self.READMES: - if os.path.exists(f): - return - else: - self.warn( - "standard file not found: should have one of " + - ', '.join(self.READMES) - ) - - def make_release_tree(self, base_dir, files): - orig.sdist.make_release_tree(self, base_dir, files) - - # Save any egg_info command line options used to create this sdist - dest = os.path.join(base_dir, 'setup.cfg') - if hasattr(os, 'link') and os.path.exists(dest): - # unlink and re-copy, since it might be hard-linked, and - # we don't want to change the source version - os.unlink(dest) - self.copy_file('setup.cfg', dest) - - self.get_finalized_command('egg_info').save_version_info(dest) - - def _manifest_is_not_generated(self): - # check for special comment used in 2.7.1 and higher - if not os.path.isfile(self.manifest): - return False - - with io.open(self.manifest, 'rb') as fp: - first_line = fp.readline() - return (first_line != - '# file GENERATED by distutils, do NOT edit\n'.encode()) - - def read_manifest(self): - """Read the manifest file (named by 'self.manifest') and use it to - fill in 'self.filelist', the list of files to include in the source - distribution. - """ - log.info("reading manifest file '%s'", self.manifest) - manifest = open(self.manifest, 'rb') - for line in manifest: - # The manifest must contain UTF-8. See #303. - if six.PY3: - try: - line = line.decode('UTF-8') - except UnicodeDecodeError: - log.warn("%r not UTF-8 decodable -- skipping" % line) - continue - # ignore comments and blank lines - line = line.strip() - if line.startswith('#') or not line: - continue - self.filelist.append(line) - manifest.close() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/setopt.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/setopt.py deleted file mode 100644 index 7e57cc0..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/setopt.py +++ /dev/null @@ -1,149 +0,0 @@ -from distutils.util import convert_path -from distutils import log -from distutils.errors import DistutilsOptionError -import distutils -import os - -from setuptools.extern.six.moves import configparser - -from setuptools import Command - -__all__ = ['config_file', 'edit_config', 'option_base', 'setopt'] - - -def config_file(kind="local"): - """Get the filename of the distutils, local, global, or per-user config - - `kind` must be one of "local", "global", or "user" - """ - if kind == 'local': - return 'setup.cfg' - if kind == 'global': - return os.path.join( - os.path.dirname(distutils.__file__), 'distutils.cfg' - ) - if kind == 'user': - dot = os.name == 'posix' and '.' or '' - return os.path.expanduser(convert_path("~/%spydistutils.cfg" % dot)) - raise ValueError( - "config_file() type must be 'local', 'global', or 'user'", kind - ) - - -def edit_config(filename, settings, dry_run=False): - """Edit a configuration file to include `settings` - - `settings` is a dictionary of dictionaries or ``None`` values, keyed by - command/section name. A ``None`` value means to delete the entire section, - while a dictionary lists settings to be changed or deleted in that section. - A setting of ``None`` means to delete that setting. - """ - log.debug("Reading configuration from %s", filename) - opts = configparser.RawConfigParser() - opts.read([filename]) - for section, options in settings.items(): - if options is None: - log.info("Deleting section [%s] from %s", section, filename) - opts.remove_section(section) - else: - if not opts.has_section(section): - log.debug("Adding new section [%s] to %s", section, filename) - opts.add_section(section) - for option, value in options.items(): - if value is None: - log.debug( - "Deleting %s.%s from %s", - section, option, filename - ) - opts.remove_option(section, option) - if not opts.options(section): - log.info("Deleting empty [%s] section from %s", - section, filename) - opts.remove_section(section) - else: - log.debug( - "Setting %s.%s to %r in %s", - section, option, value, filename - ) - opts.set(section, option, value) - - log.info("Writing %s", filename) - if not dry_run: - with open(filename, 'w') as f: - opts.write(f) - - -class option_base(Command): - """Abstract base class for commands that mess with config files""" - - user_options = [ - ('global-config', 'g', - "save options to the site-wide distutils.cfg file"), - ('user-config', 'u', - "save options to the current user's pydistutils.cfg file"), - ('filename=', 'f', - "configuration file to use (default=setup.cfg)"), - ] - - boolean_options = [ - 'global-config', 'user-config', - ] - - def initialize_options(self): - self.global_config = None - self.user_config = None - self.filename = None - - def finalize_options(self): - filenames = [] - if self.global_config: - filenames.append(config_file('global')) - if self.user_config: - filenames.append(config_file('user')) - if self.filename is not None: - filenames.append(self.filename) - if not filenames: - filenames.append(config_file('local')) - if len(filenames) > 1: - raise DistutilsOptionError( - "Must specify only one configuration file option", - filenames - ) - self.filename, = filenames - - -class setopt(option_base): - """Save command-line options to a file""" - - description = "set an option in setup.cfg or another config file" - - user_options = [ - ('command=', 'c', 'command to set an option for'), - ('option=', 'o', 'option to set'), - ('set-value=', 's', 'value of the option'), - ('remove', 'r', 'remove (unset) the value'), - ] + option_base.user_options - - boolean_options = option_base.boolean_options + ['remove'] - - def initialize_options(self): - option_base.initialize_options(self) - self.command = None - self.option = None - self.set_value = None - self.remove = None - - def finalize_options(self): - option_base.finalize_options(self) - if self.command is None or self.option is None: - raise DistutilsOptionError("Must specify --command *and* --option") - if self.set_value is None and not self.remove: - raise DistutilsOptionError("Must specify --set-value or --remove") - - def run(self): - edit_config( - self.filename, { - self.command: {self.option.replace('-', '_'): self.set_value} - }, - self.dry_run - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/test.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/test.py deleted file mode 100644 index 51aee1f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/test.py +++ /dev/null @@ -1,268 +0,0 @@ -import os -import operator -import sys -import contextlib -import itertools -import unittest -from distutils.errors import DistutilsError, DistutilsOptionError -from distutils import log -from unittest import TestLoader - -from setuptools.extern import six -from setuptools.extern.six.moves import map, filter - -from pkg_resources import (resource_listdir, resource_exists, normalize_path, - working_set, _namespace_packages, evaluate_marker, - add_activation_listener, require, EntryPoint) -from setuptools import Command - - -class ScanningLoader(TestLoader): - - def __init__(self): - TestLoader.__init__(self) - self._visited = set() - - def loadTestsFromModule(self, module, pattern=None): - """Return a suite of all tests cases contained in the given module - - If the module is a package, load tests from all the modules in it. - If the module has an ``additional_tests`` function, call it and add - the return value to the tests. - """ - if module in self._visited: - return None - self._visited.add(module) - - tests = [] - tests.append(TestLoader.loadTestsFromModule(self, module)) - - if hasattr(module, "additional_tests"): - tests.append(module.additional_tests()) - - if hasattr(module, '__path__'): - for file in resource_listdir(module.__name__, ''): - if file.endswith('.py') and file != '__init__.py': - submodule = module.__name__ + '.' + file[:-3] - else: - if resource_exists(module.__name__, file + '/__init__.py'): - submodule = module.__name__ + '.' + file - else: - continue - tests.append(self.loadTestsFromName(submodule)) - - if len(tests) != 1: - return self.suiteClass(tests) - else: - return tests[0] # don't create a nested suite for only one return - - -# adapted from jaraco.classes.properties:NonDataProperty -class NonDataProperty(object): - def __init__(self, fget): - self.fget = fget - - def __get__(self, obj, objtype=None): - if obj is None: - return self - return self.fget(obj) - - -class test(Command): - """Command to run unit tests after in-place build""" - - description = "run unit tests after in-place build" - - user_options = [ - ('test-module=', 'm', "Run 'test_suite' in specified module"), - ('test-suite=', 's', - "Run single test, case or suite (e.g. 'module.test_suite')"), - ('test-runner=', 'r', "Test runner to use"), - ] - - def initialize_options(self): - self.test_suite = None - self.test_module = None - self.test_loader = None - self.test_runner = None - - def finalize_options(self): - - if self.test_suite and self.test_module: - msg = "You may specify a module or a suite, but not both" - raise DistutilsOptionError(msg) - - if self.test_suite is None: - if self.test_module is None: - self.test_suite = self.distribution.test_suite - else: - self.test_suite = self.test_module + ".test_suite" - - if self.test_loader is None: - self.test_loader = getattr(self.distribution, 'test_loader', None) - if self.test_loader is None: - self.test_loader = "setuptools.command.test:ScanningLoader" - if self.test_runner is None: - self.test_runner = getattr(self.distribution, 'test_runner', None) - - @NonDataProperty - def test_args(self): - return list(self._test_args()) - - def _test_args(self): - if not self.test_suite and sys.version_info >= (2, 7): - yield 'discover' - if self.verbose: - yield '--verbose' - if self.test_suite: - yield self.test_suite - - def with_project_on_sys_path(self, func): - """ - Backward compatibility for project_on_sys_path context. - """ - with self.project_on_sys_path(): - func() - - @contextlib.contextmanager - def project_on_sys_path(self, include_dists=[]): - with_2to3 = six.PY3 and getattr(self.distribution, 'use_2to3', False) - - if with_2to3: - # If we run 2to3 we can not do this inplace: - - # Ensure metadata is up-to-date - self.reinitialize_command('build_py', inplace=0) - self.run_command('build_py') - bpy_cmd = self.get_finalized_command("build_py") - build_path = normalize_path(bpy_cmd.build_lib) - - # Build extensions - self.reinitialize_command('egg_info', egg_base=build_path) - self.run_command('egg_info') - - self.reinitialize_command('build_ext', inplace=0) - self.run_command('build_ext') - else: - # Without 2to3 inplace works fine: - self.run_command('egg_info') - - # Build extensions in-place - self.reinitialize_command('build_ext', inplace=1) - self.run_command('build_ext') - - ei_cmd = self.get_finalized_command("egg_info") - - old_path = sys.path[:] - old_modules = sys.modules.copy() - - try: - project_path = normalize_path(ei_cmd.egg_base) - sys.path.insert(0, project_path) - working_set.__init__() - add_activation_listener(lambda dist: dist.activate()) - require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version)) - with self.paths_on_pythonpath([project_path]): - yield - finally: - sys.path[:] = old_path - sys.modules.clear() - sys.modules.update(old_modules) - working_set.__init__() - - @staticmethod - @contextlib.contextmanager - def paths_on_pythonpath(paths): - """ - Add the indicated paths to the head of the PYTHONPATH environment - variable so that subprocesses will also see the packages at - these paths. - - Do this in a context that restores the value on exit. - """ - nothing = object() - orig_pythonpath = os.environ.get('PYTHONPATH', nothing) - current_pythonpath = os.environ.get('PYTHONPATH', '') - try: - prefix = os.pathsep.join(paths) - to_join = filter(None, [prefix, current_pythonpath]) - new_path = os.pathsep.join(to_join) - if new_path: - os.environ['PYTHONPATH'] = new_path - yield - finally: - if orig_pythonpath is nothing: - os.environ.pop('PYTHONPATH', None) - else: - os.environ['PYTHONPATH'] = orig_pythonpath - - @staticmethod - def install_dists(dist): - """ - Install the requirements indicated by self.distribution and - return an iterable of the dists that were built. - """ - ir_d = dist.fetch_build_eggs(dist.install_requires) - tr_d = dist.fetch_build_eggs(dist.tests_require or []) - er_d = dist.fetch_build_eggs( - v for k, v in dist.extras_require.items() - if k.startswith(':') and evaluate_marker(k[1:]) - ) - return itertools.chain(ir_d, tr_d, er_d) - - def run(self): - installed_dists = self.install_dists(self.distribution) - - cmd = ' '.join(self._argv) - if self.dry_run: - self.announce('skipping "%s" (dry run)' % cmd) - return - - self.announce('running "%s"' % cmd) - - paths = map(operator.attrgetter('location'), installed_dists) - with self.paths_on_pythonpath(paths): - with self.project_on_sys_path(): - self.run_tests() - - def run_tests(self): - # Purge modules under test from sys.modules. The test loader will - # re-import them from the build location. Required when 2to3 is used - # with namespace packages. - if six.PY3 and getattr(self.distribution, 'use_2to3', False): - module = self.test_suite.split('.')[0] - if module in _namespace_packages: - del_modules = [] - if module in sys.modules: - del_modules.append(module) - module += '.' - for name in sys.modules: - if name.startswith(module): - del_modules.append(name) - list(map(sys.modules.__delitem__, del_modules)) - - test = unittest.main( - None, None, self._argv, - testLoader=self._resolve_as_ep(self.test_loader), - testRunner=self._resolve_as_ep(self.test_runner), - exit=False, - ) - if not test.result.wasSuccessful(): - msg = 'Test failed: %s' % test.result - self.announce(msg, log.ERROR) - raise DistutilsError(msg) - - @property - def _argv(self): - return ['unittest'] + self.test_args - - @staticmethod - def _resolve_as_ep(val): - """ - Load the indicated attribute value, called, as a as if it were - specified as an entry point. - """ - if val is None: - return - parsed = EntryPoint.parse("x=" + val) - return parsed.resolve()() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload.py deleted file mode 100644 index a44173a..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload.py +++ /dev/null @@ -1,42 +0,0 @@ -import getpass -from distutils.command import upload as orig - - -class upload(orig.upload): - """ - Override default upload behavior to obtain password - in a variety of different ways. - """ - - def finalize_options(self): - orig.upload.finalize_options(self) - self.username = ( - self.username or - getpass.getuser() - ) - # Attempt to obtain password. Short circuit evaluation at the first - # sign of success. - self.password = ( - self.password or - self._load_password_from_keyring() or - self._prompt_for_password() - ) - - def _load_password_from_keyring(self): - """ - Attempt to load password from keyring. Suppress Exceptions. - """ - try: - keyring = __import__('keyring') - return keyring.get_password(self.repository, self.username) - except Exception: - pass - - def _prompt_for_password(self): - """ - Prompt for a password on the tty. Suppress Exceptions. - """ - try: - return getpass.getpass() - except (Exception, KeyboardInterrupt): - pass diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload_docs.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload_docs.py deleted file mode 100644 index 07aa564..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/command/upload_docs.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -"""upload_docs - -Implements a Distutils 'upload_docs' subcommand (upload documentation to -PyPI's pythonhosted.org). -""" - -from base64 import standard_b64encode -from distutils import log -from distutils.errors import DistutilsOptionError -import os -import socket -import zipfile -import tempfile -import shutil -import itertools -import functools - -from setuptools.extern import six -from setuptools.extern.six.moves import http_client, urllib - -from pkg_resources import iter_entry_points -from .upload import upload - - -def _encode(s): - errors = 'surrogateescape' if six.PY3 else 'strict' - return s.encode('utf-8', errors) - - -class upload_docs(upload): - # override the default repository as upload_docs isn't - # supported by Warehouse (and won't be). - DEFAULT_REPOSITORY = 'https://pypi.python.org/pypi/' - - description = 'Upload documentation to PyPI' - - user_options = [ - ('repository=', 'r', - "url of repository [default: %s]" % upload.DEFAULT_REPOSITORY), - ('show-response', None, - 'display full response text from server'), - ('upload-dir=', None, 'directory to upload'), - ] - boolean_options = upload.boolean_options - - def has_sphinx(self): - if self.upload_dir is None: - for ep in iter_entry_points('distutils.commands', 'build_sphinx'): - return True - - sub_commands = [('build_sphinx', has_sphinx)] - - def initialize_options(self): - upload.initialize_options(self) - self.upload_dir = None - self.target_dir = None - - def finalize_options(self): - upload.finalize_options(self) - if self.upload_dir is None: - if self.has_sphinx(): - build_sphinx = self.get_finalized_command('build_sphinx') - self.target_dir = build_sphinx.builder_target_dir - else: - build = self.get_finalized_command('build') - self.target_dir = os.path.join(build.build_base, 'docs') - else: - self.ensure_dirname('upload_dir') - self.target_dir = self.upload_dir - if 'pypi.python.org' in self.repository: - log.warn("Upload_docs command is deprecated. Use RTD instead.") - self.announce('Using upload directory %s' % self.target_dir) - - def create_zipfile(self, filename): - zip_file = zipfile.ZipFile(filename, "w") - try: - self.mkpath(self.target_dir) # just in case - for root, dirs, files in os.walk(self.target_dir): - if root == self.target_dir and not files: - tmpl = "no files found in upload directory '%s'" - raise DistutilsOptionError(tmpl % self.target_dir) - for name in files: - full = os.path.join(root, name) - relative = root[len(self.target_dir):].lstrip(os.path.sep) - dest = os.path.join(relative, name) - zip_file.write(full, dest) - finally: - zip_file.close() - - def run(self): - # Run sub commands - for cmd_name in self.get_sub_commands(): - self.run_command(cmd_name) - - tmp_dir = tempfile.mkdtemp() - name = self.distribution.metadata.get_name() - zip_file = os.path.join(tmp_dir, "%s.zip" % name) - try: - self.create_zipfile(zip_file) - self.upload_file(zip_file) - finally: - shutil.rmtree(tmp_dir) - - @staticmethod - def _build_part(item, sep_boundary): - key, values = item - title = '\nContent-Disposition: form-data; name="%s"' % key - # handle multiple entries for the same name - if not isinstance(values, list): - values = [values] - for value in values: - if isinstance(value, tuple): - title += '; filename="%s"' % value[0] - value = value[1] - else: - value = _encode(value) - yield sep_boundary - yield _encode(title) - yield b"\n\n" - yield value - if value and value[-1:] == b'\r': - yield b'\n' # write an extra newline (lurve Macs) - - @classmethod - def _build_multipart(cls, data): - """ - Build up the MIME payload for the POST data - """ - boundary = b'--------------GHSKFJDLGDS7543FJKLFHRE75642756743254' - sep_boundary = b'\n--' + boundary - end_boundary = sep_boundary + b'--' - end_items = end_boundary, b"\n", - builder = functools.partial( - cls._build_part, - sep_boundary=sep_boundary, - ) - part_groups = map(builder, data.items()) - parts = itertools.chain.from_iterable(part_groups) - body_items = itertools.chain(parts, end_items) - content_type = 'multipart/form-data; boundary=%s' % boundary.decode('ascii') - return b''.join(body_items), content_type - - def upload_file(self, filename): - with open(filename, 'rb') as f: - content = f.read() - meta = self.distribution.metadata - data = { - ':action': 'doc_upload', - 'name': meta.get_name(), - 'content': (os.path.basename(filename), content), - } - # set up the authentication - credentials = _encode(self.username + ':' + self.password) - credentials = standard_b64encode(credentials) - if six.PY3: - credentials = credentials.decode('ascii') - auth = "Basic " + credentials - - body, ct = self._build_multipart(data) - - msg = "Submitting documentation to %s" % (self.repository) - self.announce(msg, log.INFO) - - # build the Request - # We can't use urllib2 since we need to send the Basic - # auth right with the first request - schema, netloc, url, params, query, fragments = \ - urllib.parse.urlparse(self.repository) - assert not params and not query and not fragments - if schema == 'http': - conn = http_client.HTTPConnection(netloc) - elif schema == 'https': - conn = http_client.HTTPSConnection(netloc) - else: - raise AssertionError("unsupported schema " + schema) - - data = '' - try: - conn.connect() - conn.putrequest("POST", url) - content_type = ct - conn.putheader('Content-type', content_type) - conn.putheader('Content-length', str(len(body))) - conn.putheader('Authorization', auth) - conn.endheaders() - conn.send(body) - except socket.error as e: - self.announce(str(e), log.ERROR) - return - - r = conn.getresponse() - if r.status == 200: - msg = 'Server response (%s): %s' % (r.status, r.reason) - self.announce(msg, log.INFO) - elif r.status == 301: - location = r.getheader('Location') - if location is None: - location = 'https://pythonhosted.org/%s/' % meta.get_name() - msg = 'Upload successful. Visit %s' % location - self.announce(msg, log.INFO) - else: - msg = 'Upload failed (%s): %s' % (r.status, r.reason) - self.announce(msg, log.ERROR) - if self.show_response: - print('-' * 75, r.read(), '-' * 75) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/config.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/config.py deleted file mode 100644 index 8eddcae..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/config.py +++ /dev/null @@ -1,556 +0,0 @@ -from __future__ import absolute_import, unicode_literals -import io -import os -import sys -from collections import defaultdict -from functools import partial -from importlib import import_module - -from distutils.errors import DistutilsOptionError, DistutilsFileError -from setuptools.extern.six import string_types - - -def read_configuration( - filepath, find_others=False, ignore_option_errors=False): - """Read given configuration file and returns options from it as a dict. - - :param str|unicode filepath: Path to configuration file - to get options from. - - :param bool find_others: Whether to search for other configuration files - which could be on in various places. - - :param bool ignore_option_errors: Whether to silently ignore - options, values of which could not be resolved (e.g. due to exceptions - in directives such as file:, attr:, etc.). - If False exceptions are propagated as expected. - - :rtype: dict - """ - from setuptools.dist import Distribution, _Distribution - - filepath = os.path.abspath(filepath) - - if not os.path.isfile(filepath): - raise DistutilsFileError( - 'Configuration file %s does not exist.' % filepath) - - current_directory = os.getcwd() - os.chdir(os.path.dirname(filepath)) - - try: - dist = Distribution() - - filenames = dist.find_config_files() if find_others else [] - if filepath not in filenames: - filenames.append(filepath) - - _Distribution.parse_config_files(dist, filenames=filenames) - - handlers = parse_configuration( - dist, dist.command_options, - ignore_option_errors=ignore_option_errors) - - finally: - os.chdir(current_directory) - - return configuration_to_dict(handlers) - - -def configuration_to_dict(handlers): - """Returns configuration data gathered by given handlers as a dict. - - :param list[ConfigHandler] handlers: Handlers list, - usually from parse_configuration() - - :rtype: dict - """ - config_dict = defaultdict(dict) - - for handler in handlers: - - obj_alias = handler.section_prefix - target_obj = handler.target_obj - - for option in handler.set_options: - getter = getattr(target_obj, 'get_%s' % option, None) - - if getter is None: - value = getattr(target_obj, option) - - else: - value = getter() - - config_dict[obj_alias][option] = value - - return config_dict - - -def parse_configuration( - distribution, command_options, ignore_option_errors=False): - """Performs additional parsing of configuration options - for a distribution. - - Returns a list of used option handlers. - - :param Distribution distribution: - :param dict command_options: - :param bool ignore_option_errors: Whether to silently ignore - options, values of which could not be resolved (e.g. due to exceptions - in directives such as file:, attr:, etc.). - If False exceptions are propagated as expected. - :rtype: list - """ - meta = ConfigMetadataHandler( - distribution.metadata, command_options, ignore_option_errors) - meta.parse() - - options = ConfigOptionsHandler( - distribution, command_options, ignore_option_errors) - options.parse() - - return meta, options - - -class ConfigHandler(object): - """Handles metadata supplied in configuration files.""" - - section_prefix = None - """Prefix for config sections handled by this handler. - Must be provided by class heirs. - - """ - - aliases = {} - """Options aliases. - For compatibility with various packages. E.g.: d2to1 and pbr. - Note: `-` in keys is replaced with `_` by config parser. - - """ - - def __init__(self, target_obj, options, ignore_option_errors=False): - sections = {} - - section_prefix = self.section_prefix - for section_name, section_options in options.items(): - if not section_name.startswith(section_prefix): - continue - - section_name = section_name.replace(section_prefix, '').strip('.') - sections[section_name] = section_options - - self.ignore_option_errors = ignore_option_errors - self.target_obj = target_obj - self.sections = sections - self.set_options = [] - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - raise NotImplementedError( - '%s must provide .parsers property' % self.__class__.__name__) - - def __setitem__(self, option_name, value): - unknown = tuple() - target_obj = self.target_obj - - # Translate alias into real name. - option_name = self.aliases.get(option_name, option_name) - - current_value = getattr(target_obj, option_name, unknown) - - if current_value is unknown: - raise KeyError(option_name) - - if current_value: - # Already inhabited. Skipping. - return - - skip_option = False - parser = self.parsers.get(option_name) - if parser: - try: - value = parser(value) - - except Exception: - skip_option = True - if not self.ignore_option_errors: - raise - - if skip_option: - return - - setter = getattr(target_obj, 'set_%s' % option_name, None) - if setter is None: - setattr(target_obj, option_name, value) - else: - setter(value) - - self.set_options.append(option_name) - - @classmethod - def _parse_list(cls, value, separator=','): - """Represents value as a list. - - Value is split either by separator (defaults to comma) or by lines. - - :param value: - :param separator: List items separator character. - :rtype: list - """ - if isinstance(value, list): # _get_parser_compound case - return value - - if '\n' in value: - value = value.splitlines() - else: - value = value.split(separator) - - return [chunk.strip() for chunk in value if chunk.strip()] - - @classmethod - def _parse_dict(cls, value): - """Represents value as a dict. - - :param value: - :rtype: dict - """ - separator = '=' - result = {} - for line in cls._parse_list(value): - key, sep, val = line.partition(separator) - if sep != separator: - raise DistutilsOptionError( - 'Unable to parse option value to dict: %s' % value) - result[key.strip()] = val.strip() - - return result - - @classmethod - def _parse_bool(cls, value): - """Represents value as boolean. - - :param value: - :rtype: bool - """ - value = value.lower() - return value in ('1', 'true', 'yes') - - @classmethod - def _parse_file(cls, value): - """Represents value as a string, allowing including text - from nearest files using `file:` directive. - - Directive is sandboxed and won't reach anything outside - directory with setup.py. - - Examples: - file: LICENSE - file: README.rst, CHANGELOG.md, src/file.txt - - :param str value: - :rtype: str - """ - include_directive = 'file:' - - if not isinstance(value, string_types): - return value - - if not value.startswith(include_directive): - return value - - spec = value[len(include_directive):] - filepaths = (os.path.abspath(path.strip()) for path in spec.split(',')) - return '\n'.join( - cls._read_file(path) - for path in filepaths - if (cls._assert_local(path) or True) - and os.path.isfile(path) - ) - - @staticmethod - def _assert_local(filepath): - if not filepath.startswith(os.getcwd()): - raise DistutilsOptionError( - '`file:` directive can not access %s' % filepath) - - @staticmethod - def _read_file(filepath): - with io.open(filepath, encoding='utf-8') as f: - return f.read() - - @classmethod - def _parse_attr(cls, value): - """Represents value as a module attribute. - - Examples: - attr: package.attr - attr: package.module.attr - - :param str value: - :rtype: str - """ - attr_directive = 'attr:' - if not value.startswith(attr_directive): - return value - - attrs_path = value.replace(attr_directive, '').strip().split('.') - attr_name = attrs_path.pop() - - module_name = '.'.join(attrs_path) - module_name = module_name or '__init__' - - sys.path.insert(0, os.getcwd()) - try: - module = import_module(module_name) - value = getattr(module, attr_name) - - finally: - sys.path = sys.path[1:] - - return value - - @classmethod - def _get_parser_compound(cls, *parse_methods): - """Returns parser function to represents value as a list. - - Parses a value applying given methods one after another. - - :param parse_methods: - :rtype: callable - """ - def parse(value): - parsed = value - - for method in parse_methods: - parsed = method(parsed) - - return parsed - - return parse - - @classmethod - def _parse_section_to_dict(cls, section_options, values_parser=None): - """Parses section options into a dictionary. - - Optionally applies a given parser to values. - - :param dict section_options: - :param callable values_parser: - :rtype: dict - """ - value = {} - values_parser = values_parser or (lambda val: val) - for key, (_, val) in section_options.items(): - value[key] = values_parser(val) - return value - - def parse_section(self, section_options): - """Parses configuration file section. - - :param dict section_options: - """ - for (name, (_, value)) in section_options.items(): - try: - self[name] = value - - except KeyError: - pass # Keep silent for a new option may appear anytime. - - def parse(self): - """Parses configuration file items from one - or more related sections. - - """ - for section_name, section_options in self.sections.items(): - - method_postfix = '' - if section_name: # [section.option] variant - method_postfix = '_%s' % section_name - - section_parser_method = getattr( - self, - # Dots in section names are tranlsated into dunderscores. - ('parse_section%s' % method_postfix).replace('.', '__'), - None) - - if section_parser_method is None: - raise DistutilsOptionError( - 'Unsupported distribution option section: [%s.%s]' % ( - self.section_prefix, section_name)) - - section_parser_method(section_options) - - -class ConfigMetadataHandler(ConfigHandler): - - section_prefix = 'metadata' - - aliases = { - 'home_page': 'url', - 'summary': 'description', - 'classifier': 'classifiers', - 'platform': 'platforms', - } - - strict_mode = False - """We need to keep it loose, to be partially compatible with - `pbr` and `d2to1` packages which also uses `metadata` section. - - """ - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - parse_list = self._parse_list - parse_file = self._parse_file - parse_dict = self._parse_dict - - return { - 'platforms': parse_list, - 'keywords': parse_list, - 'provides': parse_list, - 'requires': parse_list, - 'obsoletes': parse_list, - 'classifiers': self._get_parser_compound(parse_file, parse_list), - 'license': parse_file, - 'description': parse_file, - 'long_description': parse_file, - 'version': self._parse_version, - 'project_urls': parse_dict, - } - - def _parse_version(self, value): - """Parses `version` option value. - - :param value: - :rtype: str - - """ - version = self._parse_attr(value) - - if callable(version): - version = version() - - if not isinstance(version, string_types): - if hasattr(version, '__iter__'): - version = '.'.join(map(str, version)) - else: - version = '%s' % version - - return version - - -class ConfigOptionsHandler(ConfigHandler): - - section_prefix = 'options' - - @property - def parsers(self): - """Metadata item name to parser function mapping.""" - parse_list = self._parse_list - parse_list_semicolon = partial(self._parse_list, separator=';') - parse_bool = self._parse_bool - parse_dict = self._parse_dict - - return { - 'zip_safe': parse_bool, - 'use_2to3': parse_bool, - 'include_package_data': parse_bool, - 'package_dir': parse_dict, - 'use_2to3_fixers': parse_list, - 'use_2to3_exclude_fixers': parse_list, - 'convert_2to3_doctests': parse_list, - 'scripts': parse_list, - 'eager_resources': parse_list, - 'dependency_links': parse_list, - 'namespace_packages': parse_list, - 'install_requires': parse_list_semicolon, - 'setup_requires': parse_list_semicolon, - 'tests_require': parse_list_semicolon, - 'packages': self._parse_packages, - 'entry_points': self._parse_file, - 'py_modules': parse_list, - } - - def _parse_packages(self, value): - """Parses `packages` option value. - - :param value: - :rtype: list - """ - find_directive = 'find:' - - if not value.startswith(find_directive): - return self._parse_list(value) - - # Read function arguments from a dedicated section. - find_kwargs = self.parse_section_packages__find( - self.sections.get('packages.find', {})) - - from setuptools import find_packages - - return find_packages(**find_kwargs) - - def parse_section_packages__find(self, section_options): - """Parses `packages.find` configuration file section. - - To be used in conjunction with _parse_packages(). - - :param dict section_options: - """ - section_data = self._parse_section_to_dict( - section_options, self._parse_list) - - valid_keys = ['where', 'include', 'exclude'] - - find_kwargs = dict( - [(k, v) for k, v in section_data.items() if k in valid_keys and v]) - - where = find_kwargs.get('where') - if where is not None: - find_kwargs['where'] = where[0] # cast list to single val - - return find_kwargs - - def parse_section_entry_points(self, section_options): - """Parses `entry_points` configuration file section. - - :param dict section_options: - """ - parsed = self._parse_section_to_dict(section_options, self._parse_list) - self['entry_points'] = parsed - - def _parse_package_data(self, section_options): - parsed = self._parse_section_to_dict(section_options, self._parse_list) - - root = parsed.get('*') - if root: - parsed[''] = root - del parsed['*'] - - return parsed - - def parse_section_package_data(self, section_options): - """Parses `package_data` configuration file section. - - :param dict section_options: - """ - self['package_data'] = self._parse_package_data(section_options) - - def parse_section_exclude_package_data(self, section_options): - """Parses `exclude_package_data` configuration file section. - - :param dict section_options: - """ - self['exclude_package_data'] = self._parse_package_data( - section_options) - - def parse_section_extras_require(self, section_options): - """Parses `extras_require` configuration file section. - - :param dict section_options: - """ - parse_list = partial(self._parse_list, separator=';') - self['extras_require'] = self._parse_section_to_dict( - section_options, parse_list) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/dep_util.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/dep_util.py deleted file mode 100644 index 2931c13..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/dep_util.py +++ /dev/null @@ -1,23 +0,0 @@ -from distutils.dep_util import newer_group - -# yes, this is was almost entirely copy-pasted from -# 'newer_pairwise()', this is just another convenience -# function. -def newer_pairwise_group(sources_groups, targets): - """Walk both arguments in parallel, testing if each source group is newer - than its corresponding target. Returns a pair of lists (sources_groups, - targets) where sources is newer than target, according to the semantics - of 'newer_group()'. - """ - if len(sources_groups) != len(targets): - raise ValueError("'sources_group' and 'targets' must be the same length") - - # build a pair of lists (sources_groups, targets) where source is newer - n_sources = [] - n_targets = [] - for i in range(len(sources_groups)): - if newer_group(sources_groups[i], targets[i]): - n_sources.append(sources_groups[i]) - n_targets.append(targets[i]) - - return n_sources, n_targets diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/depends.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/depends.py deleted file mode 100644 index 45e7052..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/depends.py +++ /dev/null @@ -1,186 +0,0 @@ -import sys -import imp -import marshal -from distutils.version import StrictVersion -from imp import PKG_DIRECTORY, PY_COMPILED, PY_SOURCE, PY_FROZEN - -from .py33compat import Bytecode - - -__all__ = [ - 'Require', 'find_module', 'get_module_constant', 'extract_constant' -] - - -class Require: - """A prerequisite to building or installing a distribution""" - - def __init__(self, name, requested_version, module, homepage='', - attribute=None, format=None): - - if format is None and requested_version is not None: - format = StrictVersion - - if format is not None: - requested_version = format(requested_version) - if attribute is None: - attribute = '__version__' - - self.__dict__.update(locals()) - del self.self - - def full_name(self): - """Return full package/distribution name, w/version""" - if self.requested_version is not None: - return '%s-%s' % (self.name, self.requested_version) - return self.name - - def version_ok(self, version): - """Is 'version' sufficiently up-to-date?""" - return self.attribute is None or self.format is None or \ - str(version) != "unknown" and version >= self.requested_version - - def get_version(self, paths=None, default="unknown"): - """Get version number of installed module, 'None', or 'default' - - Search 'paths' for module. If not found, return 'None'. If found, - return the extracted version attribute, or 'default' if no version - attribute was specified, or the value cannot be determined without - importing the module. The version is formatted according to the - requirement's version format (if any), unless it is 'None' or the - supplied 'default'. - """ - - if self.attribute is None: - try: - f, p, i = find_module(self.module, paths) - if f: - f.close() - return default - except ImportError: - return None - - v = get_module_constant(self.module, self.attribute, default, paths) - - if v is not None and v is not default and self.format is not None: - return self.format(v) - - return v - - def is_present(self, paths=None): - """Return true if dependency is present on 'paths'""" - return self.get_version(paths) is not None - - def is_current(self, paths=None): - """Return true if dependency is present and up-to-date on 'paths'""" - version = self.get_version(paths) - if version is None: - return False - return self.version_ok(version) - - -def find_module(module, paths=None): - """Just like 'imp.find_module()', but with package support""" - - parts = module.split('.') - - while parts: - part = parts.pop(0) - f, path, (suffix, mode, kind) = info = imp.find_module(part, paths) - - if kind == PKG_DIRECTORY: - parts = parts or ['__init__'] - paths = [path] - - elif parts: - raise ImportError("Can't find %r in %s" % (parts, module)) - - return info - - -def get_module_constant(module, symbol, default=-1, paths=None): - """Find 'module' by searching 'paths', and extract 'symbol' - - Return 'None' if 'module' does not exist on 'paths', or it does not define - 'symbol'. If the module defines 'symbol' as a constant, return the - constant. Otherwise, return 'default'.""" - - try: - f, path, (suffix, mode, kind) = find_module(module, paths) - except ImportError: - # Module doesn't exist - return None - - try: - if kind == PY_COMPILED: - f.read(8) # skip magic & date - code = marshal.load(f) - elif kind == PY_FROZEN: - code = imp.get_frozen_object(module) - elif kind == PY_SOURCE: - code = compile(f.read(), path, 'exec') - else: - # Not something we can parse; we'll have to import it. :( - if module not in sys.modules: - imp.load_module(module, f, path, (suffix, mode, kind)) - return getattr(sys.modules[module], symbol, None) - - finally: - if f: - f.close() - - return extract_constant(code, symbol, default) - - -def extract_constant(code, symbol, default=-1): - """Extract the constant value of 'symbol' from 'code' - - If the name 'symbol' is bound to a constant value by the Python code - object 'code', return that value. If 'symbol' is bound to an expression, - return 'default'. Otherwise, return 'None'. - - Return value is based on the first assignment to 'symbol'. 'symbol' must - be a global, or at least a non-"fast" local in the code block. That is, - only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' - must be present in 'code.co_names'. - """ - if symbol not in code.co_names: - # name's not there, can't possibly be an assignment - return None - - name_idx = list(code.co_names).index(symbol) - - STORE_NAME = 90 - STORE_GLOBAL = 97 - LOAD_CONST = 100 - - const = default - - for byte_code in Bytecode(code): - op = byte_code.opcode - arg = byte_code.arg - - if op == LOAD_CONST: - const = code.co_consts[arg] - elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): - return const - else: - const = default - - -def _update_globals(): - """ - Patch the globals to remove the objects not available on some platforms. - - XXX it'd be better to test assertions about bytecode instead. - """ - - if not sys.platform.startswith('java') and sys.platform != 'cli': - return - incompatible = 'extract_constant', 'get_module_constant' - for name in incompatible: - del globals()[name] - __all__.remove(name) - - -_update_globals() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/dist.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/dist.py deleted file mode 100644 index 5dc696f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/dist.py +++ /dev/null @@ -1,1070 +0,0 @@ -# -*- coding: utf-8 -*- -__all__ = ['Distribution'] - -import re -import os -import warnings -import numbers -import distutils.log -import distutils.core -import distutils.cmd -import distutils.dist -import itertools -from collections import defaultdict -from distutils.errors import ( - DistutilsOptionError, DistutilsPlatformError, DistutilsSetupError, -) -from distutils.util import rfc822_escape -from distutils.version import StrictVersion - -from setuptools.extern import six -from setuptools.extern import packaging -from setuptools.extern.six.moves import map, filter, filterfalse - -from setuptools.depends import Require -from setuptools import windows_support -from setuptools.monkey import get_unpatched -from setuptools.config import parse_configuration -import pkg_resources -from .py36compat import Distribution_parse_config_files - -__import__('setuptools.extern.packaging.specifiers') -__import__('setuptools.extern.packaging.version') - - -def _get_unpatched(cls): - warnings.warn("Do not call this function", DeprecationWarning) - return get_unpatched(cls) - - -def get_metadata_version(dist_md): - if dist_md.long_description_content_type or dist_md.provides_extras: - return StrictVersion('2.1') - elif (dist_md.maintainer is not None or - dist_md.maintainer_email is not None or - getattr(dist_md, 'python_requires', None) is not None): - return StrictVersion('1.2') - elif (dist_md.provides or dist_md.requires or dist_md.obsoletes or - dist_md.classifiers or dist_md.download_url): - return StrictVersion('1.1') - - return StrictVersion('1.0') - - -# Based on Python 3.5 version -def write_pkg_file(self, file): - """Write the PKG-INFO format data to a file object. - """ - version = get_metadata_version(self) - - file.write('Metadata-Version: %s\n' % version) - file.write('Name: %s\n' % self.get_name()) - file.write('Version: %s\n' % self.get_version()) - file.write('Summary: %s\n' % self.get_description()) - file.write('Home-page: %s\n' % self.get_url()) - - if version < StrictVersion('1.2'): - file.write('Author: %s\n' % self.get_contact()) - file.write('Author-email: %s\n' % self.get_contact_email()) - else: - optional_fields = ( - ('Author', 'author'), - ('Author-email', 'author_email'), - ('Maintainer', 'maintainer'), - ('Maintainer-email', 'maintainer_email'), - ) - - for field, attr in optional_fields: - attr_val = getattr(self, attr) - if six.PY2: - attr_val = self._encode_field(attr_val) - - if attr_val is not None: - file.write('%s: %s\n' % (field, attr_val)) - - file.write('License: %s\n' % self.get_license()) - if self.download_url: - file.write('Download-URL: %s\n' % self.download_url) - for project_url in self.project_urls.items(): - file.write('Project-URL: %s, %s\n' % project_url) - - long_desc = rfc822_escape(self.get_long_description()) - file.write('Description: %s\n' % long_desc) - - keywords = ','.join(self.get_keywords()) - if keywords: - file.write('Keywords: %s\n' % keywords) - - if version >= StrictVersion('1.2'): - for platform in self.get_platforms(): - file.write('Platform: %s\n' % platform) - else: - self._write_list(file, 'Platform', self.get_platforms()) - - self._write_list(file, 'Classifier', self.get_classifiers()) - - # PEP 314 - self._write_list(file, 'Requires', self.get_requires()) - self._write_list(file, 'Provides', self.get_provides()) - self._write_list(file, 'Obsoletes', self.get_obsoletes()) - - # Setuptools specific for PEP 345 - if hasattr(self, 'python_requires'): - file.write('Requires-Python: %s\n' % self.python_requires) - - # PEP 566 - if self.long_description_content_type: - file.write( - 'Description-Content-Type: %s\n' % - self.long_description_content_type - ) - if self.provides_extras: - for extra in sorted(self.provides_extras): - file.write('Provides-Extra: %s\n' % extra) - - -# from Python 3.4 -def write_pkg_info(self, base_dir): - """Write the PKG-INFO file into the release tree. - """ - with open(os.path.join(base_dir, 'PKG-INFO'), 'w', - encoding='UTF-8') as pkg_info: - self.write_pkg_file(pkg_info) - - -sequence = tuple, list - - -def check_importable(dist, attr, value): - try: - ep = pkg_resources.EntryPoint.parse('x=' + value) - assert not ep.extras - except (TypeError, ValueError, AttributeError, AssertionError): - raise DistutilsSetupError( - "%r must be importable 'module:attrs' string (got %r)" - % (attr, value) - ) - - -def assert_string_list(dist, attr, value): - """Verify that value is a string list or None""" - try: - assert ''.join(value) != value - except (TypeError, ValueError, AttributeError, AssertionError): - raise DistutilsSetupError( - "%r must be a list of strings (got %r)" % (attr, value) - ) - - -def check_nsp(dist, attr, value): - """Verify that namespace packages are valid""" - ns_packages = value - assert_string_list(dist, attr, ns_packages) - for nsp in ns_packages: - if not dist.has_contents_for(nsp): - raise DistutilsSetupError( - "Distribution contains no modules or packages for " + - "namespace package %r" % nsp - ) - parent, sep, child = nsp.rpartition('.') - if parent and parent not in ns_packages: - distutils.log.warn( - "WARNING: %r is declared as a package namespace, but %r" - " is not: please correct this in setup.py", nsp, parent - ) - - -def check_extras(dist, attr, value): - """Verify that extras_require mapping is valid""" - try: - list(itertools.starmap(_check_extra, value.items())) - except (TypeError, ValueError, AttributeError): - raise DistutilsSetupError( - "'extras_require' must be a dictionary whose values are " - "strings or lists of strings containing valid project/version " - "requirement specifiers." - ) - - -def _check_extra(extra, reqs): - name, sep, marker = extra.partition(':') - if marker and pkg_resources.invalid_marker(marker): - raise DistutilsSetupError("Invalid environment marker: " + marker) - list(pkg_resources.parse_requirements(reqs)) - - -def assert_bool(dist, attr, value): - """Verify that value is True, False, 0, or 1""" - if bool(value) != value: - tmpl = "{attr!r} must be a boolean value (got {value!r})" - raise DistutilsSetupError(tmpl.format(attr=attr, value=value)) - - -def check_requirements(dist, attr, value): - """Verify that install_requires is a valid requirements list""" - try: - list(pkg_resources.parse_requirements(value)) - if isinstance(value, (dict, set)): - raise TypeError("Unordered types are not allowed") - except (TypeError, ValueError) as error: - tmpl = ( - "{attr!r} must be a string or list of strings " - "containing valid project/version requirement specifiers; {error}" - ) - raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) - - -def check_specifier(dist, attr, value): - """Verify that value is a valid version specifier""" - try: - packaging.specifiers.SpecifierSet(value) - except packaging.specifiers.InvalidSpecifier as error: - tmpl = ( - "{attr!r} must be a string " - "containing valid version specifiers; {error}" - ) - raise DistutilsSetupError(tmpl.format(attr=attr, error=error)) - - -def check_entry_points(dist, attr, value): - """Verify that entry_points map is parseable""" - try: - pkg_resources.EntryPoint.parse_map(value) - except ValueError as e: - raise DistutilsSetupError(e) - - -def check_test_suite(dist, attr, value): - if not isinstance(value, six.string_types): - raise DistutilsSetupError("test_suite must be a string") - - -def check_package_data(dist, attr, value): - """Verify that value is a dictionary of package names to glob lists""" - if isinstance(value, dict): - for k, v in value.items(): - if not isinstance(k, str): - break - try: - iter(v) - except TypeError: - break - else: - return - raise DistutilsSetupError( - attr + " must be a dictionary mapping package names to lists of " - "wildcard patterns" - ) - - -def check_packages(dist, attr, value): - for pkgname in value: - if not re.match(r'\w+(\.\w+)*', pkgname): - distutils.log.warn( - "WARNING: %r not a valid package name; please use only " - ".-separated package names in setup.py", pkgname - ) - - -_Distribution = get_unpatched(distutils.core.Distribution) - - -class Distribution(Distribution_parse_config_files, _Distribution): - """Distribution with support for features, tests, and package data - - This is an enhanced version of 'distutils.dist.Distribution' that - effectively adds the following new optional keyword arguments to 'setup()': - - 'install_requires' -- a string or sequence of strings specifying project - versions that the distribution requires when installed, in the format - used by 'pkg_resources.require()'. They will be installed - automatically when the package is installed. If you wish to use - packages that are not available in PyPI, or want to give your users an - alternate download location, you can add a 'find_links' option to the - '[easy_install]' section of your project's 'setup.cfg' file, and then - setuptools will scan the listed web pages for links that satisfy the - requirements. - - 'extras_require' -- a dictionary mapping names of optional "extras" to the - additional requirement(s) that using those extras incurs. For example, - this:: - - extras_require = dict(reST = ["docutils>=0.3", "reSTedit"]) - - indicates that the distribution can optionally provide an extra - capability called "reST", but it can only be used if docutils and - reSTedit are installed. If the user installs your package using - EasyInstall and requests one of your extras, the corresponding - additional requirements will be installed if needed. - - 'features' **deprecated** -- a dictionary mapping option names to - 'setuptools.Feature' - objects. Features are a portion of the distribution that can be - included or excluded based on user options, inter-feature dependencies, - and availability on the current system. Excluded features are omitted - from all setup commands, including source and binary distributions, so - you can create multiple distributions from the same source tree. - Feature names should be valid Python identifiers, except that they may - contain the '-' (minus) sign. Features can be included or excluded - via the command line options '--with-X' and '--without-X', where 'X' is - the name of the feature. Whether a feature is included by default, and - whether you are allowed to control this from the command line, is - determined by the Feature object. See the 'Feature' class for more - information. - - 'test_suite' -- the name of a test suite to run for the 'test' command. - If the user runs 'python setup.py test', the package will be installed, - and the named test suite will be run. The format is the same as - would be used on a 'unittest.py' command line. That is, it is the - dotted name of an object to import and call to generate a test suite. - - 'package_data' -- a dictionary mapping package names to lists of filenames - or globs to use to find data files contained in the named packages. - If the dictionary has filenames or globs listed under '""' (the empty - string), those names will be searched for in every package, in addition - to any names for the specific package. Data files found using these - names/globs will be installed along with the package, in the same - location as the package. Note that globs are allowed to reference - the contents of non-package subdirectories, as long as you use '/' as - a path separator. (Globs are automatically converted to - platform-specific paths at runtime.) - - In addition to these new keywords, this class also has several new methods - for manipulating the distribution's contents. For example, the 'include()' - and 'exclude()' methods can be thought of as in-place add and subtract - commands that add or remove packages, modules, extensions, and so on from - the distribution. They are used by the feature subsystem to configure the - distribution for the included and excluded features. - """ - - _patched_dist = None - - def patch_missing_pkg_info(self, attrs): - # Fake up a replacement for the data that would normally come from - # PKG-INFO, but which might not yet be built if this is a fresh - # checkout. - # - if not attrs or 'name' not in attrs or 'version' not in attrs: - return - key = pkg_resources.safe_name(str(attrs['name'])).lower() - dist = pkg_resources.working_set.by_key.get(key) - if dist is not None and not dist.has_metadata('PKG-INFO'): - dist._version = pkg_resources.safe_version(str(attrs['version'])) - self._patched_dist = dist - - def __init__(self, attrs=None): - have_package_data = hasattr(self, "package_data") - if not have_package_data: - self.package_data = {} - attrs = attrs or {} - if 'features' in attrs or 'require_features' in attrs: - Feature.warn_deprecated() - self.require_features = [] - self.features = {} - self.dist_files = [] - self.src_root = attrs.pop("src_root", None) - self.patch_missing_pkg_info(attrs) - self.project_urls = attrs.get('project_urls', {}) - self.dependency_links = attrs.pop('dependency_links', []) - self.setup_requires = attrs.pop('setup_requires', []) - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - vars(self).setdefault(ep.name, None) - _Distribution.__init__(self, attrs) - - # The project_urls attribute may not be supported in distutils, so - # prime it here from our value if not automatically set - self.metadata.project_urls = getattr( - self.metadata, 'project_urls', self.project_urls) - self.metadata.long_description_content_type = attrs.get( - 'long_description_content_type' - ) - self.metadata.provides_extras = getattr( - self.metadata, 'provides_extras', set() - ) - - if isinstance(self.metadata.version, numbers.Number): - # Some people apparently take "version number" too literally :) - self.metadata.version = str(self.metadata.version) - - if self.metadata.version is not None: - try: - ver = packaging.version.Version(self.metadata.version) - normalized_version = str(ver) - if self.metadata.version != normalized_version: - warnings.warn( - "Normalizing '%s' to '%s'" % ( - self.metadata.version, - normalized_version, - ) - ) - self.metadata.version = normalized_version - except (packaging.version.InvalidVersion, TypeError): - warnings.warn( - "The version specified (%r) is an invalid version, this " - "may not work as expected with newer versions of " - "setuptools, pip, and PyPI. Please see PEP 440 for more " - "details." % self.metadata.version - ) - self._finalize_requires() - - def _finalize_requires(self): - """ - Set `metadata.python_requires` and fix environment markers - in `install_requires` and `extras_require`. - """ - if getattr(self, 'python_requires', None): - self.metadata.python_requires = self.python_requires - - if getattr(self, 'extras_require', None): - for extra in self.extras_require.keys(): - # Since this gets called multiple times at points where the - # keys have become 'converted' extras, ensure that we are only - # truly adding extras we haven't seen before here. - extra = extra.split(':')[0] - if extra: - self.metadata.provides_extras.add(extra) - - self._convert_extras_requirements() - self._move_install_requirements_markers() - - def _convert_extras_requirements(self): - """ - Convert requirements in `extras_require` of the form - `"extra": ["barbazquux; {marker}"]` to - `"extra:{marker}": ["barbazquux"]`. - """ - spec_ext_reqs = getattr(self, 'extras_require', None) or {} - self._tmp_extras_require = defaultdict(list) - for section, v in spec_ext_reqs.items(): - # Do not strip empty sections. - self._tmp_extras_require[section] - for r in pkg_resources.parse_requirements(v): - suffix = self._suffix_for(r) - self._tmp_extras_require[section + suffix].append(r) - - @staticmethod - def _suffix_for(req): - """ - For a requirement, return the 'extras_require' suffix for - that requirement. - """ - return ':' + str(req.marker) if req.marker else '' - - def _move_install_requirements_markers(self): - """ - Move requirements in `install_requires` that are using environment - markers `extras_require`. - """ - - # divide the install_requires into two sets, simple ones still - # handled by install_requires and more complex ones handled - # by extras_require. - - def is_simple_req(req): - return not req.marker - - spec_inst_reqs = getattr(self, 'install_requires', None) or () - inst_reqs = list(pkg_resources.parse_requirements(spec_inst_reqs)) - simple_reqs = filter(is_simple_req, inst_reqs) - complex_reqs = filterfalse(is_simple_req, inst_reqs) - self.install_requires = list(map(str, simple_reqs)) - - for r in complex_reqs: - self._tmp_extras_require[':' + str(r.marker)].append(r) - self.extras_require = dict( - (k, [str(r) for r in map(self._clean_req, v)]) - for k, v in self._tmp_extras_require.items() - ) - - def _clean_req(self, req): - """ - Given a Requirement, remove environment markers and return it. - """ - req.marker = None - return req - - def parse_config_files(self, filenames=None, ignore_option_errors=False): - """Parses configuration files from various levels - and loads configuration. - - """ - _Distribution.parse_config_files(self, filenames=filenames) - - parse_configuration(self, self.command_options, - ignore_option_errors=ignore_option_errors) - self._finalize_requires() - - def parse_command_line(self): - """Process features after parsing command line options""" - result = _Distribution.parse_command_line(self) - if self.features: - self._finalize_features() - return result - - def _feature_attrname(self, name): - """Convert feature name to corresponding option attribute name""" - return 'with_' + name.replace('-', '_') - - def fetch_build_eggs(self, requires): - """Resolve pre-setup requirements""" - resolved_dists = pkg_resources.working_set.resolve( - pkg_resources.parse_requirements(requires), - installer=self.fetch_build_egg, - replace_conflicting=True, - ) - for dist in resolved_dists: - pkg_resources.working_set.add(dist, replace=True) - return resolved_dists - - def finalize_options(self): - _Distribution.finalize_options(self) - if self.features: - self._set_global_opts_from_features() - - for ep in pkg_resources.iter_entry_points('distutils.setup_keywords'): - value = getattr(self, ep.name, None) - if value is not None: - ep.require(installer=self.fetch_build_egg) - ep.load()(self, ep.name, value) - if getattr(self, 'convert_2to3_doctests', None): - # XXX may convert to set here when we can rely on set being builtin - self.convert_2to3_doctests = [ - os.path.abspath(p) - for p in self.convert_2to3_doctests - ] - else: - self.convert_2to3_doctests = [] - - def get_egg_cache_dir(self): - egg_cache_dir = os.path.join(os.curdir, '.eggs') - if not os.path.exists(egg_cache_dir): - os.mkdir(egg_cache_dir) - windows_support.hide_file(egg_cache_dir) - readme_txt_filename = os.path.join(egg_cache_dir, 'README.txt') - with open(readme_txt_filename, 'w') as f: - f.write('This directory contains eggs that were downloaded ' - 'by setuptools to build, test, and run plug-ins.\n\n') - f.write('This directory caches those eggs to prevent ' - 'repeated downloads.\n\n') - f.write('However, it is safe to delete this directory.\n\n') - - return egg_cache_dir - - def fetch_build_egg(self, req): - """Fetch an egg needed for building""" - from setuptools.command.easy_install import easy_install - dist = self.__class__({'script_args': ['easy_install']}) - opts = dist.get_option_dict('easy_install') - opts.clear() - opts.update( - (k, v) - for k, v in self.get_option_dict('easy_install').items() - if k in ( - # don't use any other settings - 'find_links', 'site_dirs', 'index_url', - 'optimize', 'site_dirs', 'allow_hosts', - )) - if self.dependency_links: - links = self.dependency_links[:] - if 'find_links' in opts: - links = opts['find_links'][1] + links - opts['find_links'] = ('setup', links) - install_dir = self.get_egg_cache_dir() - cmd = easy_install( - dist, args=["x"], install_dir=install_dir, - exclude_scripts=True, - always_copy=False, build_directory=None, editable=False, - upgrade=False, multi_version=True, no_report=True, user=False - ) - cmd.ensure_finalized() - return cmd.easy_install(req) - - def _set_global_opts_from_features(self): - """Add --with-X/--without-X options based on optional features""" - - go = [] - no = self.negative_opt.copy() - - for name, feature in self.features.items(): - self._set_feature(name, None) - feature.validate(self) - - if feature.optional: - descr = feature.description - incdef = ' (default)' - excdef = '' - if not feature.include_by_default(): - excdef, incdef = incdef, excdef - - new = ( - ('with-' + name, None, 'include ' + descr + incdef), - ('without-' + name, None, 'exclude ' + descr + excdef), - ) - go.extend(new) - no['without-' + name] = 'with-' + name - - self.global_options = self.feature_options = go + self.global_options - self.negative_opt = self.feature_negopt = no - - def _finalize_features(self): - """Add/remove features and resolve dependencies between them""" - - # First, flag all the enabled items (and thus their dependencies) - for name, feature in self.features.items(): - enabled = self.feature_is_included(name) - if enabled or (enabled is None and feature.include_by_default()): - feature.include_in(self) - self._set_feature(name, 1) - - # Then disable the rest, so that off-by-default features don't - # get flagged as errors when they're required by an enabled feature - for name, feature in self.features.items(): - if not self.feature_is_included(name): - feature.exclude_from(self) - self._set_feature(name, 0) - - def get_command_class(self, command): - """Pluggable version of get_command_class()""" - if command in self.cmdclass: - return self.cmdclass[command] - - eps = pkg_resources.iter_entry_points('distutils.commands', command) - for ep in eps: - ep.require(installer=self.fetch_build_egg) - self.cmdclass[command] = cmdclass = ep.load() - return cmdclass - else: - return _Distribution.get_command_class(self, command) - - def print_commands(self): - for ep in pkg_resources.iter_entry_points('distutils.commands'): - if ep.name not in self.cmdclass: - # don't require extras as the commands won't be invoked - cmdclass = ep.resolve() - self.cmdclass[ep.name] = cmdclass - return _Distribution.print_commands(self) - - def get_command_list(self): - for ep in pkg_resources.iter_entry_points('distutils.commands'): - if ep.name not in self.cmdclass: - # don't require extras as the commands won't be invoked - cmdclass = ep.resolve() - self.cmdclass[ep.name] = cmdclass - return _Distribution.get_command_list(self) - - def _set_feature(self, name, status): - """Set feature's inclusion status""" - setattr(self, self._feature_attrname(name), status) - - def feature_is_included(self, name): - """Return 1 if feature is included, 0 if excluded, 'None' if unknown""" - return getattr(self, self._feature_attrname(name)) - - def include_feature(self, name): - """Request inclusion of feature named 'name'""" - - if self.feature_is_included(name) == 0: - descr = self.features[name].description - raise DistutilsOptionError( - descr + " is required, but was excluded or is not available" - ) - self.features[name].include_in(self) - self._set_feature(name, 1) - - def include(self, **attrs): - """Add items to distribution that are named in keyword arguments - - For example, 'dist.exclude(py_modules=["x"])' would add 'x' to - the distribution's 'py_modules' attribute, if it was not already - there. - - Currently, this method only supports inclusion for attributes that are - lists or tuples. If you need to add support for adding to other - attributes in this or a subclass, you can add an '_include_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'include()'. So, 'dist.include(foo={"bar":"baz"})' - will try to call 'dist._include_foo({"bar":"baz"})', which can then - handle whatever special inclusion logic is needed. - """ - for k, v in attrs.items(): - include = getattr(self, '_include_' + k, None) - if include: - include(v) - else: - self._include_misc(k, v) - - def exclude_package(self, package): - """Remove packages, modules, and extensions in named package""" - - pfx = package + '.' - if self.packages: - self.packages = [ - p for p in self.packages - if p != package and not p.startswith(pfx) - ] - - if self.py_modules: - self.py_modules = [ - p for p in self.py_modules - if p != package and not p.startswith(pfx) - ] - - if self.ext_modules: - self.ext_modules = [ - p for p in self.ext_modules - if p.name != package and not p.name.startswith(pfx) - ] - - def has_contents_for(self, package): - """Return true if 'exclude_package(package)' would do something""" - - pfx = package + '.' - - for p in self.iter_distribution_names(): - if p == package or p.startswith(pfx): - return True - - def _exclude_misc(self, name, value): - """Handle 'exclude()' for list/tuple attrs without a special handler""" - if not isinstance(value, sequence): - raise DistutilsSetupError( - "%s: setting must be a list or tuple (%r)" % (name, value) - ) - try: - old = getattr(self, name) - except AttributeError: - raise DistutilsSetupError( - "%s: No such distribution setting" % name - ) - if old is not None and not isinstance(old, sequence): - raise DistutilsSetupError( - name + ": this setting cannot be changed via include/exclude" - ) - elif old: - setattr(self, name, [item for item in old if item not in value]) - - def _include_misc(self, name, value): - """Handle 'include()' for list/tuple attrs without a special handler""" - - if not isinstance(value, sequence): - raise DistutilsSetupError( - "%s: setting must be a list (%r)" % (name, value) - ) - try: - old = getattr(self, name) - except AttributeError: - raise DistutilsSetupError( - "%s: No such distribution setting" % name - ) - if old is None: - setattr(self, name, value) - elif not isinstance(old, sequence): - raise DistutilsSetupError( - name + ": this setting cannot be changed via include/exclude" - ) - else: - new = [item for item in value if item not in old] - setattr(self, name, old + new) - - def exclude(self, **attrs): - """Remove items from distribution that are named in keyword arguments - - For example, 'dist.exclude(py_modules=["x"])' would remove 'x' from - the distribution's 'py_modules' attribute. Excluding packages uses - the 'exclude_package()' method, so all of the package's contained - packages, modules, and extensions are also excluded. - - Currently, this method only supports exclusion from attributes that are - lists or tuples. If you need to add support for excluding from other - attributes in this or a subclass, you can add an '_exclude_X' method, - where 'X' is the name of the attribute. The method will be called with - the value passed to 'exclude()'. So, 'dist.exclude(foo={"bar":"baz"})' - will try to call 'dist._exclude_foo({"bar":"baz"})', which can then - handle whatever special exclusion logic is needed. - """ - for k, v in attrs.items(): - exclude = getattr(self, '_exclude_' + k, None) - if exclude: - exclude(v) - else: - self._exclude_misc(k, v) - - def _exclude_packages(self, packages): - if not isinstance(packages, sequence): - raise DistutilsSetupError( - "packages: setting must be a list or tuple (%r)" % (packages,) - ) - list(map(self.exclude_package, packages)) - - def _parse_command_opts(self, parser, args): - # Remove --with-X/--without-X options when processing command args - self.global_options = self.__class__.global_options - self.negative_opt = self.__class__.negative_opt - - # First, expand any aliases - command = args[0] - aliases = self.get_option_dict('aliases') - while command in aliases: - src, alias = aliases[command] - del aliases[command] # ensure each alias can expand only once! - import shlex - args[:1] = shlex.split(alias, True) - command = args[0] - - nargs = _Distribution._parse_command_opts(self, parser, args) - - # Handle commands that want to consume all remaining arguments - cmd_class = self.get_command_class(command) - if getattr(cmd_class, 'command_consumes_arguments', None): - self.get_option_dict(command)['args'] = ("command line", nargs) - if nargs is not None: - return [] - - return nargs - - def get_cmdline_options(self): - """Return a '{cmd: {opt:val}}' map of all command-line options - - Option names are all long, but do not include the leading '--', and - contain dashes rather than underscores. If the option doesn't take - an argument (e.g. '--quiet'), the 'val' is 'None'. - - Note that options provided by config files are intentionally excluded. - """ - - d = {} - - for cmd, opts in self.command_options.items(): - - for opt, (src, val) in opts.items(): - - if src != "command line": - continue - - opt = opt.replace('_', '-') - - if val == 0: - cmdobj = self.get_command_obj(cmd) - neg_opt = self.negative_opt.copy() - neg_opt.update(getattr(cmdobj, 'negative_opt', {})) - for neg, pos in neg_opt.items(): - if pos == opt: - opt = neg - val = None - break - else: - raise AssertionError("Shouldn't be able to get here") - - elif val == 1: - val = None - - d.setdefault(cmd, {})[opt] = val - - return d - - def iter_distribution_names(self): - """Yield all packages, modules, and extension names in distribution""" - - for pkg in self.packages or (): - yield pkg - - for module in self.py_modules or (): - yield module - - for ext in self.ext_modules or (): - if isinstance(ext, tuple): - name, buildinfo = ext - else: - name = ext.name - if name.endswith('module'): - name = name[:-6] - yield name - - def handle_display_options(self, option_order): - """If there were any non-global "display-only" options - (--help-commands or the metadata display options) on the command - line, display the requested info and return true; else return - false. - """ - import sys - - if six.PY2 or self.help_commands: - return _Distribution.handle_display_options(self, option_order) - - # Stdout may be StringIO (e.g. in tests) - import io - if not isinstance(sys.stdout, io.TextIOWrapper): - return _Distribution.handle_display_options(self, option_order) - - # Don't wrap stdout if utf-8 is already the encoding. Provides - # workaround for #334. - if sys.stdout.encoding.lower() in ('utf-8', 'utf8'): - return _Distribution.handle_display_options(self, option_order) - - # Print metadata in UTF-8 no matter the platform - encoding = sys.stdout.encoding - errors = sys.stdout.errors - newline = sys.platform != 'win32' and '\n' or None - line_buffering = sys.stdout.line_buffering - - sys.stdout = io.TextIOWrapper( - sys.stdout.detach(), 'utf-8', errors, newline, line_buffering) - try: - return _Distribution.handle_display_options(self, option_order) - finally: - sys.stdout = io.TextIOWrapper( - sys.stdout.detach(), encoding, errors, newline, line_buffering) - - -class Feature: - """ - **deprecated** -- The `Feature` facility was never completely implemented - or supported, `has reported issues - <https://github.com/pypa/setuptools/issues/58>`_ and will be removed in - a future version. - - A subset of the distribution that can be excluded if unneeded/wanted - - Features are created using these keyword arguments: - - 'description' -- a short, human readable description of the feature, to - be used in error messages, and option help messages. - - 'standard' -- if true, the feature is included by default if it is - available on the current system. Otherwise, the feature is only - included if requested via a command line '--with-X' option, or if - another included feature requires it. The default setting is 'False'. - - 'available' -- if true, the feature is available for installation on the - current system. The default setting is 'True'. - - 'optional' -- if true, the feature's inclusion can be controlled from the - command line, using the '--with-X' or '--without-X' options. If - false, the feature's inclusion status is determined automatically, - based on 'availabile', 'standard', and whether any other feature - requires it. The default setting is 'True'. - - 'require_features' -- a string or sequence of strings naming features - that should also be included if this feature is included. Defaults to - empty list. May also contain 'Require' objects that should be - added/removed from the distribution. - - 'remove' -- a string or list of strings naming packages to be removed - from the distribution if this feature is *not* included. If the - feature *is* included, this argument is ignored. This argument exists - to support removing features that "crosscut" a distribution, such as - defining a 'tests' feature that removes all the 'tests' subpackages - provided by other features. The default for this argument is an empty - list. (Note: the named package(s) or modules must exist in the base - distribution when the 'setup()' function is initially called.) - - other keywords -- any other keyword arguments are saved, and passed to - the distribution's 'include()' and 'exclude()' methods when the - feature is included or excluded, respectively. So, for example, you - could pass 'packages=["a","b"]' to cause packages 'a' and 'b' to be - added or removed from the distribution as appropriate. - - A feature must include at least one 'requires', 'remove', or other - keyword argument. Otherwise, it can't affect the distribution in any way. - Note also that you can subclass 'Feature' to create your own specialized - feature types that modify the distribution in other ways when included or - excluded. See the docstrings for the various methods here for more detail. - Aside from the methods, the only feature attributes that distributions look - at are 'description' and 'optional'. - """ - - @staticmethod - def warn_deprecated(): - msg = ( - "Features are deprecated and will be removed in a future " - "version. See https://github.com/pypa/setuptools/issues/65." - ) - warnings.warn(msg, DeprecationWarning, stacklevel=3) - - def __init__( - self, description, standard=False, available=True, - optional=True, require_features=(), remove=(), **extras): - self.warn_deprecated() - - self.description = description - self.standard = standard - self.available = available - self.optional = optional - if isinstance(require_features, (str, Require)): - require_features = require_features, - - self.require_features = [ - r for r in require_features if isinstance(r, str) - ] - er = [r for r in require_features if not isinstance(r, str)] - if er: - extras['require_features'] = er - - if isinstance(remove, str): - remove = remove, - self.remove = remove - self.extras = extras - - if not remove and not require_features and not extras: - raise DistutilsSetupError( - "Feature %s: must define 'require_features', 'remove', or " - "at least one of 'packages', 'py_modules', etc." - ) - - def include_by_default(self): - """Should this feature be included by default?""" - return self.available and self.standard - - def include_in(self, dist): - """Ensure feature and its requirements are included in distribution - - You may override this in a subclass to perform additional operations on - the distribution. Note that this method may be called more than once - per feature, and so should be idempotent. - - """ - - if not self.available: - raise DistutilsPlatformError( - self.description + " is required, " - "but is not available on this platform" - ) - - dist.include(**self.extras) - - for f in self.require_features: - dist.include_feature(f) - - def exclude_from(self, dist): - """Ensure feature is excluded from distribution - - You may override this in a subclass to perform additional operations on - the distribution. This method will be called at most once per - feature, and only after all included features have been asked to - include themselves. - """ - - dist.exclude(**self.extras) - - if self.remove: - for item in self.remove: - dist.exclude_package(item) - - def validate(self, dist): - """Verify that feature makes sense in context of distribution - - This method is called by the distribution just before it parses its - command line. It checks to ensure that the 'remove' attribute, if any, - contains only valid package/module names that are present in the base - distribution when 'setup()' is called. You may override it in a - subclass to perform any other required validation of the feature - against a target distribution. - """ - - for item in self.remove: - if not dist.has_contents_for(item): - raise DistutilsSetupError( - "%s wants to be able to remove %s, but the distribution" - " doesn't contain any packages or modules under %s" - % (self.description, item, item) - ) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/extension.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/extension.py deleted file mode 100644 index 2946889..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/extension.py +++ /dev/null @@ -1,57 +0,0 @@ -import re -import functools -import distutils.core -import distutils.errors -import distutils.extension - -from setuptools.extern.six.moves import map - -from .monkey import get_unpatched - - -def _have_cython(): - """ - Return True if Cython can be imported. - """ - cython_impl = 'Cython.Distutils.build_ext' - try: - # from (cython_impl) import build_ext - __import__(cython_impl, fromlist=['build_ext']).build_ext - return True - except Exception: - pass - return False - - -# for compatibility -have_pyrex = _have_cython - -_Extension = get_unpatched(distutils.core.Extension) - - -class Extension(_Extension): - """Extension that uses '.c' files in place of '.pyx' files""" - - def __init__(self, name, sources, *args, **kw): - # The *args is needed for compatibility as calls may use positional - # arguments. py_limited_api may be set only via keyword. - self.py_limited_api = kw.pop("py_limited_api", False) - _Extension.__init__(self, name, sources, *args, **kw) - - def _convert_pyx_sources_to_lang(self): - """ - Replace sources with .pyx extensions to sources with the target - language extension. This mechanism allows language authors to supply - pre-converted sources but to prefer the .pyx sources. - """ - if _have_cython(): - # the build has Cython, so allow it to compile the .pyx files - return - lang = self.language or '' - target_ext = '.cpp' if lang.lower() == 'c++' else '.c' - sub = functools.partial(re.sub, '.pyx$', target_ext) - self.sources = list(map(sub, self.sources)) - - -class Library(Extension): - """Just like a regular Extension, but built as a library instead""" diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__init__.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__init__.py deleted file mode 100644 index da3d668..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__init__.py +++ /dev/null @@ -1,73 +0,0 @@ -import sys - - -class VendorImporter: - """ - A PEP 302 meta path importer for finding optionally-vendored - or otherwise naturally-installed packages from root_name. - """ - - def __init__(self, root_name, vendored_names=(), vendor_pkg=None): - self.root_name = root_name - self.vendored_names = set(vendored_names) - self.vendor_pkg = vendor_pkg or root_name.replace('extern', '_vendor') - - @property - def search_path(self): - """ - Search first the vendor package then as a natural package. - """ - yield self.vendor_pkg + '.' - yield '' - - def find_module(self, fullname, path=None): - """ - Return self when fullname starts with root_name and the - target module is one vendored through this importer. - """ - root, base, target = fullname.partition(self.root_name + '.') - if root: - return - if not any(map(target.startswith, self.vendored_names)): - return - return self - - def load_module(self, fullname): - """ - Iterate over the search path to locate and load fullname. - """ - root, base, target = fullname.partition(self.root_name + '.') - for prefix in self.search_path: - try: - extant = prefix + target - __import__(extant) - mod = sys.modules[extant] - sys.modules[fullname] = mod - # mysterious hack: - # Remove the reference to the extant package/module - # on later Python versions to cause relative imports - # in the vendor package to resolve the same modules - # as those going through this importer. - if sys.version_info > (3, 3): - del sys.modules[extant] - return mod - except ImportError: - pass - else: - raise ImportError( - "The '{target}' package is required; " - "normally this is bundled with this package so if you get " - "this warning, consult the packager of your " - "distribution.".format(**locals()) - ) - - def install(self): - """ - Install this importer into sys.meta_path if not already present. - """ - if self not in sys.meta_path: - sys.meta_path.append(self) - - -names = 'six', 'packaging', 'pyparsing', -VendorImporter(__name__, names, 'setuptools._vendor').install() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__pycache__/__init__.cpython-36.pyc b/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__pycache__/__init__.cpython-36.pyc deleted file mode 100644 index c3923cc..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/extern/__pycache__/__init__.cpython-36.pyc and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/glibc.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/glibc.py deleted file mode 100644 index a134591..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/glibc.py +++ /dev/null @@ -1,86 +0,0 @@ -# This file originally from pip: -# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/utils/glibc.py -from __future__ import absolute_import - -import ctypes -import re -import warnings - - -def glibc_version_string(): - "Returns glibc version string, or None if not using glibc." - - # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen - # manpage says, "If filename is NULL, then the returned handle is for the - # main program". This way we can let the linker do the work to figure out - # which libc our process is actually using. - process_namespace = ctypes.CDLL(None) - try: - gnu_get_libc_version = process_namespace.gnu_get_libc_version - except AttributeError: - # Symbol doesn't exist -> therefore, we are not linked to - # glibc. - return None - - # Call gnu_get_libc_version, which returns a string like "2.5" - gnu_get_libc_version.restype = ctypes.c_char_p - version_str = gnu_get_libc_version() - # py2 / py3 compatibility: - if not isinstance(version_str, str): - version_str = version_str.decode("ascii") - - return version_str - - -# Separated out from have_compatible_glibc for easier unit testing -def check_glibc_version(version_str, required_major, minimum_minor): - # Parse string and check against requested version. - # - # We use a regexp instead of str.split because we want to discard any - # random junk that might come after the minor version -- this might happen - # in patched/forked versions of glibc (e.g. Linaro's version of glibc - # uses version strings like "2.20-2014.11"). See gh-3588. - m = re.match(r"(?P<major>[0-9]+)\.(?P<minor>[0-9]+)", version_str) - if not m: - warnings.warn("Expected glibc version with 2 components major.minor," - " got: %s" % version_str, RuntimeWarning) - return False - return (int(m.group("major")) == required_major and - int(m.group("minor")) >= minimum_minor) - - -def have_compatible_glibc(required_major, minimum_minor): - version_str = glibc_version_string() - if version_str is None: - return False - return check_glibc_version(version_str, required_major, minimum_minor) - - -# platform.libc_ver regularly returns completely nonsensical glibc -# versions. E.g. on my computer, platform says: -# -# ~$ python2.7 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.7') -# ~$ python3.5 -c 'import platform; print(platform.libc_ver())' -# ('glibc', '2.9') -# -# But the truth is: -# -# ~$ ldd --version -# ldd (Debian GLIBC 2.22-11) 2.22 -# -# This is unfortunate, because it means that the linehaul data on libc -# versions that was generated by pip 8.1.2 and earlier is useless and -# misleading. Solution: instead of using platform, use our code that actually -# works. -def libc_ver(): - """Try to determine the glibc version - - Returns a tuple of strings (lib, version) which default to empty strings - in case the lookup fails. - """ - glibc_version = glibc_version_string() - if glibc_version is None: - return ("", "") - else: - return ("glibc", glibc_version) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/glob.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/glob.py deleted file mode 100644 index 6c781de..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/glob.py +++ /dev/null @@ -1,176 +0,0 @@ -""" -Filename globbing utility. Mostly a copy of `glob` from Python 3.5. - -Changes include: - * `yield from` and PEP3102 `*` removed. - * `bytes` changed to `six.binary_type`. - * Hidden files are not ignored. -""" - -import os -import re -import fnmatch -from setuptools.extern.six import binary_type - -__all__ = ["glob", "iglob", "escape"] - - -def glob(pathname, recursive=False): - """Return a list of paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - return list(iglob(pathname, recursive=recursive)) - - -def iglob(pathname, recursive=False): - """Return an iterator which yields the paths matching a pathname pattern. - - The pattern may contain simple shell-style wildcards a la - fnmatch. However, unlike fnmatch, filenames starting with a - dot are special cases that are not matched by '*' and '?' - patterns. - - If recursive is true, the pattern '**' will match any files and - zero or more directories and subdirectories. - """ - it = _iglob(pathname, recursive) - if recursive and _isrecursive(pathname): - s = next(it) # skip empty string - assert not s - return it - - -def _iglob(pathname, recursive): - dirname, basename = os.path.split(pathname) - if not has_magic(pathname): - if basename: - if os.path.lexists(pathname): - yield pathname - else: - # Patterns ending with a slash should match only directories - if os.path.isdir(dirname): - yield pathname - return - if not dirname: - if recursive and _isrecursive(basename): - for x in glob2(dirname, basename): - yield x - else: - for x in glob1(dirname, basename): - yield x - return - # `os.path.split()` returns the argument itself as a dirname if it is a - # drive or UNC path. Prevent an infinite recursion if a drive or UNC path - # contains magic characters (i.e. r'\\?\C:'). - if dirname != pathname and has_magic(dirname): - dirs = _iglob(dirname, recursive) - else: - dirs = [dirname] - if has_magic(basename): - if recursive and _isrecursive(basename): - glob_in_dir = glob2 - else: - glob_in_dir = glob1 - else: - glob_in_dir = glob0 - for dirname in dirs: - for name in glob_in_dir(dirname, basename): - yield os.path.join(dirname, name) - - -# These 2 helper functions non-recursively glob inside a literal directory. -# They return a list of basenames. `glob1` accepts a pattern while `glob0` -# takes a literal basename (so it only has to check for its existence). - - -def glob1(dirname, pattern): - if not dirname: - if isinstance(pattern, binary_type): - dirname = os.curdir.encode('ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except OSError: - return [] - return fnmatch.filter(names, pattern) - - -def glob0(dirname, basename): - if not basename: - # `os.path.split()` returns an empty basename for paths ending with a - # directory separator. 'q*x/' should match only directories. - if os.path.isdir(dirname): - return [basename] - else: - if os.path.lexists(os.path.join(dirname, basename)): - return [basename] - return [] - - -# This helper function recursively yields relative pathnames inside a literal -# directory. - - -def glob2(dirname, pattern): - assert _isrecursive(pattern) - yield pattern[:0] - for x in _rlistdir(dirname): - yield x - - -# Recursively yields relative pathnames inside a literal directory. -def _rlistdir(dirname): - if not dirname: - if isinstance(dirname, binary_type): - dirname = binary_type(os.curdir, 'ASCII') - else: - dirname = os.curdir - try: - names = os.listdir(dirname) - except os.error: - return - for x in names: - yield x - path = os.path.join(dirname, x) if dirname else x - for y in _rlistdir(path): - yield os.path.join(x, y) - - -magic_check = re.compile('([*?[])') -magic_check_bytes = re.compile(b'([*?[])') - - -def has_magic(s): - if isinstance(s, binary_type): - match = magic_check_bytes.search(s) - else: - match = magic_check.search(s) - return match is not None - - -def _isrecursive(pattern): - if isinstance(pattern, binary_type): - return pattern == b'**' - else: - return pattern == '**' - - -def escape(pathname): - """Escape all special characters. - """ - # Escaping is done by wrapping any of "*?[" between square brackets. - # Metacharacters do not work in the drive part and shouldn't be escaped. - drive, pathname = os.path.splitdrive(pathname) - if isinstance(pathname, binary_type): - pathname = magic_check_bytes.sub(br'[\1]', pathname) - else: - pathname = magic_check.sub(r'[\1]', pathname) - return drive + pathname diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-32.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-32.exe deleted file mode 100644 index f8d3509..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-32.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-64.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-64.exe deleted file mode 100644 index 330c51a..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui-64.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui.exe b/classifier/myenv/lib/python3.6/site-packages/setuptools/gui.exe deleted file mode 100644 index f8d3509..0000000 Binary files a/classifier/myenv/lib/python3.6/site-packages/setuptools/gui.exe and /dev/null differ diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/launch.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/launch.py deleted file mode 100644 index 308283e..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/launch.py +++ /dev/null @@ -1,35 +0,0 @@ -""" -Launch the Python script on the command line after -setuptools is bootstrapped via import. -""" - -# Note that setuptools gets imported implicitly by the -# invocation of this script using python -m setuptools.launch - -import tokenize -import sys - - -def run(): - """ - Run the script in sys.argv[1] as if it had - been invoked naturally. - """ - __builtins__ - script_name = sys.argv[1] - namespace = dict( - __file__=script_name, - __name__='__main__', - __doc__=None, - ) - sys.argv[:] = sys.argv[1:] - - open_ = getattr(tokenize, 'open', open) - script = open_(script_name).read() - norm_script = script.replace('\\r\\n', '\\n') - code = compile(norm_script, script_name, 'exec') - exec(code, namespace) - - -if __name__ == '__main__': - run() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/lib2to3_ex.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/lib2to3_ex.py deleted file mode 100644 index 4b1a73f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/lib2to3_ex.py +++ /dev/null @@ -1,62 +0,0 @@ -""" -Customized Mixin2to3 support: - - - adds support for converting doctests - - -This module raises an ImportError on Python 2. -""" - -from distutils.util import Mixin2to3 as _Mixin2to3 -from distutils import log -from lib2to3.refactor import RefactoringTool, get_fixers_from_package - -import setuptools - - -class DistutilsRefactoringTool(RefactoringTool): - def log_error(self, msg, *args, **kw): - log.error(msg, *args) - - def log_message(self, msg, *args): - log.info(msg, *args) - - def log_debug(self, msg, *args): - log.debug(msg, *args) - - -class Mixin2to3(_Mixin2to3): - def run_2to3(self, files, doctests=False): - # See of the distribution option has been set, otherwise check the - # setuptools default. - if self.distribution.use_2to3 is not True: - return - if not files: - return - log.info("Fixing " + " ".join(files)) - self.__build_fixer_names() - self.__exclude_fixers() - if doctests: - if setuptools.run_2to3_on_doctests: - r = DistutilsRefactoringTool(self.fixer_names) - r.refactor(files, write=True, doctests_only=True) - else: - _Mixin2to3.run_2to3(self, files) - - def __build_fixer_names(self): - if self.fixer_names: - return - self.fixer_names = [] - for p in setuptools.lib2to3_fixer_packages: - self.fixer_names.extend(get_fixers_from_package(p)) - if self.distribution.use_2to3_fixers is not None: - for p in self.distribution.use_2to3_fixers: - self.fixer_names.extend(get_fixers_from_package(p)) - - def __exclude_fixers(self): - excluded_fixers = getattr(self, 'exclude_fixers', []) - if self.distribution.use_2to3_exclude_fixers is not None: - excluded_fixers.extend(self.distribution.use_2to3_exclude_fixers) - for fixer_name in excluded_fixers: - if fixer_name in self.fixer_names: - self.fixer_names.remove(fixer_name) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/monkey.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/monkey.py deleted file mode 100644 index d9eb7d7..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/monkey.py +++ /dev/null @@ -1,197 +0,0 @@ -""" -Monkey patching of distutils. -""" - -import sys -import distutils.filelist -import platform -import types -import functools -from importlib import import_module -import inspect - -from setuptools.extern import six - -import setuptools - -__all__ = [] -""" -Everything is private. Contact the project team -if you think you need this functionality. -""" - - -def _get_mro(cls): - """ - Returns the bases classes for cls sorted by the MRO. - - Works around an issue on Jython where inspect.getmro will not return all - base classes if multiple classes share the same name. Instead, this - function will return a tuple containing the class itself, and the contents - of cls.__bases__. See https://github.com/pypa/setuptools/issues/1024. - """ - if platform.python_implementation() == "Jython": - return (cls,) + cls.__bases__ - return inspect.getmro(cls) - - -def get_unpatched(item): - lookup = ( - get_unpatched_class if isinstance(item, six.class_types) else - get_unpatched_function if isinstance(item, types.FunctionType) else - lambda item: None - ) - return lookup(item) - - -def get_unpatched_class(cls): - """Protect against re-patching the distutils if reloaded - - Also ensures that no other distutils extension monkeypatched the distutils - first. - """ - external_bases = ( - cls - for cls in _get_mro(cls) - if not cls.__module__.startswith('setuptools') - ) - base = next(external_bases) - if not base.__module__.startswith('distutils'): - msg = "distutils has already been patched by %r" % cls - raise AssertionError(msg) - return base - - -def patch_all(): - # we can't patch distutils.cmd, alas - distutils.core.Command = setuptools.Command - - has_issue_12885 = sys.version_info <= (3, 5, 3) - - if has_issue_12885: - # fix findall bug in distutils (http://bugs.python.org/issue12885) - distutils.filelist.findall = setuptools.findall - - needs_warehouse = ( - sys.version_info < (2, 7, 13) - or - (3, 0) < sys.version_info < (3, 3, 7) - or - (3, 4) < sys.version_info < (3, 4, 6) - or - (3, 5) < sys.version_info <= (3, 5, 3) - ) - - if needs_warehouse: - warehouse = 'https://upload.pypi.org/legacy/' - distutils.config.PyPIRCCommand.DEFAULT_REPOSITORY = warehouse - - _patch_distribution_metadata_write_pkg_file() - _patch_distribution_metadata_write_pkg_info() - - # Install Distribution throughout the distutils - for module in distutils.dist, distutils.core, distutils.cmd: - module.Distribution = setuptools.dist.Distribution - - # Install the patched Extension - distutils.core.Extension = setuptools.extension.Extension - distutils.extension.Extension = setuptools.extension.Extension - if 'distutils.command.build_ext' in sys.modules: - sys.modules['distutils.command.build_ext'].Extension = ( - setuptools.extension.Extension - ) - - patch_for_msvc_specialized_compiler() - - -def _patch_distribution_metadata_write_pkg_file(): - """Patch write_pkg_file to also write Requires-Python/Requires-External""" - distutils.dist.DistributionMetadata.write_pkg_file = ( - setuptools.dist.write_pkg_file - ) - - -def _patch_distribution_metadata_write_pkg_info(): - """ - Workaround issue #197 - Python 3 prior to 3.2.2 uses an environment-local - encoding to save the pkg_info. Monkey-patch its write_pkg_info method to - correct this undesirable behavior. - """ - environment_local = (3,) <= sys.version_info[:3] < (3, 2, 2) - if not environment_local: - return - - distutils.dist.DistributionMetadata.write_pkg_info = ( - setuptools.dist.write_pkg_info - ) - - -def patch_func(replacement, target_mod, func_name): - """ - Patch func_name in target_mod with replacement - - Important - original must be resolved by name to avoid - patching an already patched function. - """ - original = getattr(target_mod, func_name) - - # set the 'unpatched' attribute on the replacement to - # point to the original. - vars(replacement).setdefault('unpatched', original) - - # replace the function in the original module - setattr(target_mod, func_name, replacement) - - -def get_unpatched_function(candidate): - return getattr(candidate, 'unpatched') - - -def patch_for_msvc_specialized_compiler(): - """ - Patch functions in distutils to use standalone Microsoft Visual C++ - compilers. - """ - # import late to avoid circular imports on Python < 3.5 - msvc = import_module('setuptools.msvc') - - if platform.system() != 'Windows': - # Compilers only availables on Microsoft Windows - return - - def patch_params(mod_name, func_name): - """ - Prepare the parameters for patch_func to patch indicated function. - """ - repl_prefix = 'msvc9_' if 'msvc9' in mod_name else 'msvc14_' - repl_name = repl_prefix + func_name.lstrip('_') - repl = getattr(msvc, repl_name) - mod = import_module(mod_name) - if not hasattr(mod, func_name): - raise ImportError(func_name) - return repl, mod, func_name - - # Python 2.7 to 3.4 - msvc9 = functools.partial(patch_params, 'distutils.msvc9compiler') - - # Python 3.5+ - msvc14 = functools.partial(patch_params, 'distutils._msvccompiler') - - try: - # Patch distutils.msvc9compiler - patch_func(*msvc9('find_vcvarsall')) - patch_func(*msvc9('query_vcvarsall')) - except ImportError: - pass - - try: - # Patch distutils._msvccompiler._get_vc_env - patch_func(*msvc14('_get_vc_env')) - except ImportError: - pass - - try: - # Patch distutils._msvccompiler.gen_lib_options for Numpy - patch_func(*msvc14('gen_lib_options')) - except ImportError: - pass diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/msvc.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/msvc.py deleted file mode 100644 index 5e20b3f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/msvc.py +++ /dev/null @@ -1,1302 +0,0 @@ -""" -Improved support for Microsoft Visual C++ compilers. - -Known supported compilers: --------------------------- -Microsoft Visual C++ 9.0: - Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) - Microsoft Windows SDK 6.1 (x86, x64, ia64) - Microsoft Windows SDK 7.0 (x86, x64, ia64) - -Microsoft Visual C++ 10.0: - Microsoft Windows SDK 7.1 (x86, x64, ia64) - -Microsoft Visual C++ 14.0: - Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) - Microsoft Visual Studio 2017 (x86, x64, arm, arm64) - Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) -""" - -import os -import sys -import platform -import itertools -import distutils.errors -from setuptools.extern.packaging.version import LegacyVersion - -from setuptools.extern.six.moves import filterfalse - -from .monkey import get_unpatched - -if platform.system() == 'Windows': - from setuptools.extern.six.moves import winreg - safe_env = os.environ -else: - """ - Mock winreg and environ so the module can be imported - on this platform. - """ - - class winreg: - HKEY_USERS = None - HKEY_CURRENT_USER = None - HKEY_LOCAL_MACHINE = None - HKEY_CLASSES_ROOT = None - - safe_env = dict() - -_msvc9_suppress_errors = ( - # msvc9compiler isn't available on some platforms - ImportError, - - # msvc9compiler raises DistutilsPlatformError in some - # environments. See #1118. - distutils.errors.DistutilsPlatformError, -) - -try: - from distutils.msvc9compiler import Reg -except _msvc9_suppress_errors: - pass - - -def msvc9_find_vcvarsall(version): - """ - Patched "distutils.msvc9compiler.find_vcvarsall" to use the standalone - compiler build for Python (VCForPython). Fall back to original behavior - when the standalone compiler is not available. - - Redirect the path of "vcvarsall.bat". - - Known supported compilers - ------------------------- - Microsoft Visual C++ 9.0: - Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) - - Parameters - ---------- - version: float - Required Microsoft Visual C++ version. - - Return - ------ - vcvarsall.bat path: str - """ - VC_BASE = r'Software\%sMicrosoft\DevDiv\VCForPython\%0.1f' - key = VC_BASE % ('', version) - try: - # Per-user installs register the compiler path here - productdir = Reg.get_value(key, "installdir") - except KeyError: - try: - # All-user installs on a 64-bit system register here - key = VC_BASE % ('Wow6432Node\\', version) - productdir = Reg.get_value(key, "installdir") - except KeyError: - productdir = None - - if productdir: - vcvarsall = os.path.os.path.join(productdir, "vcvarsall.bat") - if os.path.isfile(vcvarsall): - return vcvarsall - - return get_unpatched(msvc9_find_vcvarsall)(version) - - -def msvc9_query_vcvarsall(ver, arch='x86', *args, **kwargs): - """ - Patched "distutils.msvc9compiler.query_vcvarsall" for support extra - compilers. - - Set environment without use of "vcvarsall.bat". - - Known supported compilers - ------------------------- - Microsoft Visual C++ 9.0: - Microsoft Visual C++ Compiler for Python 2.7 (x86, amd64) - Microsoft Windows SDK 6.1 (x86, x64, ia64) - Microsoft Windows SDK 7.0 (x86, x64, ia64) - - Microsoft Visual C++ 10.0: - Microsoft Windows SDK 7.1 (x86, x64, ia64) - - Parameters - ---------- - ver: float - Required Microsoft Visual C++ version. - arch: str - Target architecture. - - Return - ------ - environment: dict - """ - # Try to get environement from vcvarsall.bat (Classical way) - try: - orig = get_unpatched(msvc9_query_vcvarsall) - return orig(ver, arch, *args, **kwargs) - except distutils.errors.DistutilsPlatformError: - # Pass error if Vcvarsall.bat is missing - pass - except ValueError: - # Pass error if environment not set after executing vcvarsall.bat - pass - - # If error, try to set environment directly - try: - return EnvironmentInfo(arch, ver).return_env() - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, ver, arch) - raise - - -def msvc14_get_vc_env(plat_spec): - """ - Patched "distutils._msvccompiler._get_vc_env" for support extra - compilers. - - Set environment without use of "vcvarsall.bat". - - Known supported compilers - ------------------------- - Microsoft Visual C++ 14.0: - Microsoft Visual C++ Build Tools 2015 (x86, x64, arm) - Microsoft Visual Studio 2017 (x86, x64, arm, arm64) - Microsoft Visual Studio Build Tools 2017 (x86, x64, arm, arm64) - - Parameters - ---------- - plat_spec: str - Target architecture. - - Return - ------ - environment: dict - """ - # Try to get environment from vcvarsall.bat (Classical way) - try: - return get_unpatched(msvc14_get_vc_env)(plat_spec) - except distutils.errors.DistutilsPlatformError: - # Pass error Vcvarsall.bat is missing - pass - - # If error, try to set environment directly - try: - return EnvironmentInfo(plat_spec, vc_min_ver=14.0).return_env() - except distutils.errors.DistutilsPlatformError as exc: - _augment_exception(exc, 14.0) - raise - - -def msvc14_gen_lib_options(*args, **kwargs): - """ - Patched "distutils._msvccompiler.gen_lib_options" for fix - compatibility between "numpy.distutils" and "distutils._msvccompiler" - (for Numpy < 1.11.2) - """ - if "numpy.distutils" in sys.modules: - import numpy as np - if LegacyVersion(np.__version__) < LegacyVersion('1.11.2'): - return np.distutils.ccompiler.gen_lib_options(*args, **kwargs) - return get_unpatched(msvc14_gen_lib_options)(*args, **kwargs) - - -def _augment_exception(exc, version, arch=''): - """ - Add details to the exception message to help guide the user - as to what action will resolve it. - """ - # Error if MSVC++ directory not found or environment not set - message = exc.args[0] - - if "vcvarsall" in message.lower() or "visual c" in message.lower(): - # Special error message if MSVC++ not installed - tmpl = 'Microsoft Visual C++ {version:0.1f} is required.' - message = tmpl.format(**locals()) - msdownload = 'www.microsoft.com/download/details.aspx?id=%d' - if version == 9.0: - if arch.lower().find('ia64') > -1: - # For VC++ 9.0, if IA64 support is needed, redirect user - # to Windows SDK 7.0 - message += ' Get it with "Microsoft Windows SDK 7.0": ' - message += msdownload % 3138 - else: - # For VC++ 9.0 redirect user to Vc++ for Python 2.7 : - # This redirection link is maintained by Microsoft. - # Contact vspython@microsoft.com if it needs updating. - message += ' Get it from http://aka.ms/vcpython27' - elif version == 10.0: - # For VC++ 10.0 Redirect user to Windows SDK 7.1 - message += ' Get it with "Microsoft Windows SDK 7.1": ' - message += msdownload % 8279 - elif version >= 14.0: - # For VC++ 14.0 Redirect user to Visual C++ Build Tools - message += (' Get it with "Microsoft Visual C++ Build Tools": ' - r'http://landinghub.visualstudio.com/' - 'visual-cpp-build-tools') - - exc.args = (message, ) - - -class PlatformInfo: - """ - Current and Target Architectures informations. - - Parameters - ---------- - arch: str - Target architecture. - """ - current_cpu = safe_env.get('processor_architecture', '').lower() - - def __init__(self, arch): - self.arch = arch.lower().replace('x64', 'amd64') - - @property - def target_cpu(self): - return self.arch[self.arch.find('_') + 1:] - - def target_is_x86(self): - return self.target_cpu == 'x86' - - def current_is_x86(self): - return self.current_cpu == 'x86' - - def current_dir(self, hidex86=False, x64=False): - """ - Current platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - subfolder: str - '\target', or '' (see hidex86 parameter) - """ - return ( - '' if (self.current_cpu == 'x86' and hidex86) else - r'\x64' if (self.current_cpu == 'amd64' and x64) else - r'\%s' % self.current_cpu - ) - - def target_dir(self, hidex86=False, x64=False): - r""" - Target platform specific subfolder. - - Parameters - ---------- - hidex86: bool - return '' and not '\x86' if architecture is x86. - x64: bool - return '\x64' and not '\amd64' if architecture is amd64. - - Return - ------ - subfolder: str - '\current', or '' (see hidex86 parameter) - """ - return ( - '' if (self.target_cpu == 'x86' and hidex86) else - r'\x64' if (self.target_cpu == 'amd64' and x64) else - r'\%s' % self.target_cpu - ) - - def cross_dir(self, forcex86=False): - r""" - Cross platform specific subfolder. - - Parameters - ---------- - forcex86: bool - Use 'x86' as current architecture even if current acritecture is - not x86. - - Return - ------ - subfolder: str - '' if target architecture is current architecture, - '\current_target' if not. - """ - current = 'x86' if forcex86 else self.current_cpu - return ( - '' if self.target_cpu == current else - self.target_dir().replace('\\', '\\%s_' % current) - ) - - -class RegistryInfo: - """ - Microsoft Visual Studio related registry informations. - - Parameters - ---------- - platform_info: PlatformInfo - "PlatformInfo" instance. - """ - HKEYS = (winreg.HKEY_USERS, - winreg.HKEY_CURRENT_USER, - winreg.HKEY_LOCAL_MACHINE, - winreg.HKEY_CLASSES_ROOT) - - def __init__(self, platform_info): - self.pi = platform_info - - @property - def visualstudio(self): - """ - Microsoft Visual Studio root registry key. - """ - return 'VisualStudio' - - @property - def sxs(self): - """ - Microsoft Visual Studio SxS registry key. - """ - return os.path.join(self.visualstudio, 'SxS') - - @property - def vc(self): - """ - Microsoft Visual C++ VC7 registry key. - """ - return os.path.join(self.sxs, 'VC7') - - @property - def vs(self): - """ - Microsoft Visual Studio VS7 registry key. - """ - return os.path.join(self.sxs, 'VS7') - - @property - def vc_for_python(self): - """ - Microsoft Visual C++ for Python registry key. - """ - return r'DevDiv\VCForPython' - - @property - def microsoft_sdk(self): - """ - Microsoft SDK registry key. - """ - return 'Microsoft SDKs' - - @property - def windows_sdk(self): - """ - Microsoft Windows/Platform SDK registry key. - """ - return os.path.join(self.microsoft_sdk, 'Windows') - - @property - def netfx_sdk(self): - """ - Microsoft .NET Framework SDK registry key. - """ - return os.path.join(self.microsoft_sdk, 'NETFXSDK') - - @property - def windows_kits_roots(self): - """ - Microsoft Windows Kits Roots registry key. - """ - return r'Windows Kits\Installed Roots' - - def microsoft(self, key, x86=False): - """ - Return key in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - x86: str - Force x86 software registry. - - Return - ------ - str: value - """ - node64 = '' if self.pi.current_is_x86() or x86 else 'Wow6432Node' - return os.path.join('Software', node64, 'Microsoft', key) - - def lookup(self, key, name): - """ - Look for values in registry in Microsoft software registry. - - Parameters - ---------- - key: str - Registry key path where look. - name: str - Value name to find. - - Return - ------ - str: value - """ - KEY_READ = winreg.KEY_READ - openkey = winreg.OpenKey - ms = self.microsoft - for hkey in self.HKEYS: - try: - bkey = openkey(hkey, ms(key), 0, KEY_READ) - except (OSError, IOError): - if not self.pi.current_is_x86(): - try: - bkey = openkey(hkey, ms(key, True), 0, KEY_READ) - except (OSError, IOError): - continue - else: - continue - try: - return winreg.QueryValueEx(bkey, name)[0] - except (OSError, IOError): - pass - - -class SystemInfo: - """ - Microsoft Windows and Visual Studio related system inormations. - - Parameters - ---------- - registry_info: RegistryInfo - "RegistryInfo" instance. - vc_ver: float - Required Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparaison. - WinDir = safe_env.get('WinDir', '') - ProgramFiles = safe_env.get('ProgramFiles', '') - ProgramFilesx86 = safe_env.get('ProgramFiles(x86)', ProgramFiles) - - def __init__(self, registry_info, vc_ver=None): - self.ri = registry_info - self.pi = self.ri.pi - self.vc_ver = vc_ver or self._find_latest_available_vc_ver() - - def _find_latest_available_vc_ver(self): - try: - return self.find_available_vc_vers()[-1] - except IndexError: - err = 'No Microsoft Visual C++ version found' - raise distutils.errors.DistutilsPlatformError(err) - - def find_available_vc_vers(self): - """ - Find all available Microsoft Visual C++ versions. - """ - ms = self.ri.microsoft - vckeys = (self.ri.vc, self.ri.vc_for_python, self.ri.vs) - vc_vers = [] - for hkey in self.ri.HKEYS: - for key in vckeys: - try: - bkey = winreg.OpenKey(hkey, ms(key), 0, winreg.KEY_READ) - except (OSError, IOError): - continue - subkeys, values, _ = winreg.QueryInfoKey(bkey) - for i in range(values): - try: - ver = float(winreg.EnumValue(bkey, i)[0]) - if ver not in vc_vers: - vc_vers.append(ver) - except ValueError: - pass - for i in range(subkeys): - try: - ver = float(winreg.EnumKey(bkey, i)) - if ver not in vc_vers: - vc_vers.append(ver) - except ValueError: - pass - return sorted(vc_vers) - - @property - def VSInstallDir(self): - """ - Microsoft Visual Studio directory. - """ - # Default path - name = 'Microsoft Visual Studio %0.1f' % self.vc_ver - default = os.path.join(self.ProgramFilesx86, name) - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vs, '%0.1f' % self.vc_ver) or default - - @property - def VCInstallDir(self): - """ - Microsoft Visual C++ directory. - """ - self.VSInstallDir - - guess_vc = self._guess_vc() or self._guess_vc_legacy() - - # Try to get "VC++ for Python" path from registry as default path - reg_path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) - python_vc = self.ri.lookup(reg_path, 'installdir') - default_vc = os.path.join(python_vc, 'VC') if python_vc else guess_vc - - # Try to get path from registry, if fail use default path - path = self.ri.lookup(self.ri.vc, '%0.1f' % self.vc_ver) or default_vc - - if not os.path.isdir(path): - msg = 'Microsoft Visual C++ directory not found' - raise distutils.errors.DistutilsPlatformError(msg) - - return path - - def _guess_vc(self): - """ - Locate Visual C for 2017 - """ - if self.vc_ver <= 14.0: - return - - default = r'VC\Tools\MSVC' - guess_vc = os.path.join(self.VSInstallDir, default) - # Subdir with VC exact version as name - try: - vc_exact_ver = os.listdir(guess_vc)[-1] - return os.path.join(guess_vc, vc_exact_ver) - except (OSError, IOError, IndexError): - pass - - def _guess_vc_legacy(self): - """ - Locate Visual C for versions prior to 2017 - """ - default = r'Microsoft Visual Studio %0.1f\VC' % self.vc_ver - return os.path.join(self.ProgramFilesx86, default) - - @property - def WindowsSdkVersion(self): - """ - Microsoft Windows SDK versions for specified MSVC++ version. - """ - if self.vc_ver <= 9.0: - return ('7.0', '6.1', '6.0a') - elif self.vc_ver == 10.0: - return ('7.1', '7.0a') - elif self.vc_ver == 11.0: - return ('8.0', '8.0a') - elif self.vc_ver == 12.0: - return ('8.1', '8.1a') - elif self.vc_ver >= 14.0: - return ('10.0', '8.1') - - @property - def WindowsSdkLastVersion(self): - """ - Microsoft Windows SDK last version - """ - return self._use_last_dir_name(os.path.join( - self.WindowsSdkDir, 'lib')) - - @property - def WindowsSdkDir(self): - """ - Microsoft Windows SDK directory. - """ - sdkdir = '' - for ver in self.WindowsSdkVersion: - # Try to get it from registry - loc = os.path.join(self.ri.windows_sdk, 'v%s' % ver) - sdkdir = self.ri.lookup(loc, 'installationfolder') - if sdkdir: - break - if not sdkdir or not os.path.isdir(sdkdir): - # Try to get "VC++ for Python" version from registry - path = os.path.join(self.ri.vc_for_python, '%0.1f' % self.vc_ver) - install_base = self.ri.lookup(path, 'installdir') - if install_base: - sdkdir = os.path.join(install_base, 'WinSDK') - if not sdkdir or not os.path.isdir(sdkdir): - # If fail, use default new path - for ver in self.WindowsSdkVersion: - intver = ver[:ver.rfind('.')] - path = r'Microsoft SDKs\Windows Kits\%s' % (intver) - d = os.path.join(self.ProgramFiles, path) - if os.path.isdir(d): - sdkdir = d - if not sdkdir or not os.path.isdir(sdkdir): - # If fail, use default old path - for ver in self.WindowsSdkVersion: - path = r'Microsoft SDKs\Windows\v%s' % ver - d = os.path.join(self.ProgramFiles, path) - if os.path.isdir(d): - sdkdir = d - if not sdkdir: - # If fail, use Platform SDK - sdkdir = os.path.join(self.VCInstallDir, 'PlatformSDK') - return sdkdir - - @property - def WindowsSDKExecutablePath(self): - """ - Microsoft Windows SDK executable directory. - """ - # Find WinSDK NetFx Tools registry dir name - if self.vc_ver <= 11.0: - netfxver = 35 - arch = '' - else: - netfxver = 40 - hidex86 = True if self.vc_ver <= 12.0 else False - arch = self.pi.current_dir(x64=True, hidex86=hidex86) - fx = 'WinSDK-NetFx%dTools%s' % (netfxver, arch.replace('\\', '-')) - - # liste all possibles registry paths - regpaths = [] - if self.vc_ver >= 14.0: - for ver in self.NetFxSdkVersion: - regpaths += [os.path.join(self.ri.netfx_sdk, ver, fx)] - - for ver in self.WindowsSdkVersion: - regpaths += [os.path.join(self.ri.windows_sdk, 'v%sA' % ver, fx)] - - # Return installation folder from the more recent path - for path in regpaths: - execpath = self.ri.lookup(path, 'installationfolder') - if execpath: - break - return execpath - - @property - def FSharpInstallDir(self): - """ - Microsoft Visual F# directory. - """ - path = r'%0.1f\Setup\F#' % self.vc_ver - path = os.path.join(self.ri.visualstudio, path) - return self.ri.lookup(path, 'productdir') or '' - - @property - def UniversalCRTSdkDir(self): - """ - Microsoft Universal CRT SDK directory. - """ - # Set Kit Roots versions for specified MSVC++ version - if self.vc_ver >= 14.0: - vers = ('10', '81') - else: - vers = () - - # Find path of the more recent Kit - for ver in vers: - sdkdir = self.ri.lookup(self.ri.windows_kits_roots, - 'kitsroot%s' % ver) - if sdkdir: - break - return sdkdir or '' - - @property - def UniversalCRTSdkLastVersion(self): - """ - Microsoft Universal C Runtime SDK last version - """ - return self._use_last_dir_name(os.path.join( - self.UniversalCRTSdkDir, 'lib')) - - @property - def NetFxSdkVersion(self): - """ - Microsoft .NET Framework SDK versions. - """ - # Set FxSdk versions for specified MSVC++ version - if self.vc_ver >= 14.0: - return ('4.6.1', '4.6') - else: - return () - - @property - def NetFxSdkDir(self): - """ - Microsoft .NET Framework SDK directory. - """ - for ver in self.NetFxSdkVersion: - loc = os.path.join(self.ri.netfx_sdk, ver) - sdkdir = self.ri.lookup(loc, 'kitsinstallationfolder') - if sdkdir: - break - return sdkdir or '' - - @property - def FrameworkDir32(self): - """ - Microsoft .NET Framework 32bit directory. - """ - # Default path - guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir32') or guess_fw - - @property - def FrameworkDir64(self): - """ - Microsoft .NET Framework 64bit directory. - """ - # Default path - guess_fw = os.path.join(self.WinDir, r'Microsoft.NET\Framework64') - - # Try to get path from registry, if fail use default path - return self.ri.lookup(self.ri.vc, 'frameworkdir64') or guess_fw - - @property - def FrameworkVersion32(self): - """ - Microsoft .NET Framework 32bit versions. - """ - return self._find_dot_net_versions(32) - - @property - def FrameworkVersion64(self): - """ - Microsoft .NET Framework 64bit versions. - """ - return self._find_dot_net_versions(64) - - def _find_dot_net_versions(self, bits): - """ - Find Microsoft .NET Framework versions. - - Parameters - ---------- - bits: int - Platform number of bits: 32 or 64. - """ - # Find actual .NET version in registry - reg_ver = self.ri.lookup(self.ri.vc, 'frameworkver%d' % bits) - dot_net_dir = getattr(self, 'FrameworkDir%d' % bits) - ver = reg_ver or self._use_last_dir_name(dot_net_dir, 'v') or '' - - # Set .NET versions for specified MSVC++ version - if self.vc_ver >= 12.0: - frameworkver = (ver, 'v4.0') - elif self.vc_ver >= 10.0: - frameworkver = ('v4.0.30319' if ver.lower()[:2] != 'v4' else ver, - 'v3.5') - elif self.vc_ver == 9.0: - frameworkver = ('v3.5', 'v2.0.50727') - if self.vc_ver == 8.0: - frameworkver = ('v3.0', 'v2.0.50727') - return frameworkver - - def _use_last_dir_name(self, path, prefix=''): - """ - Return name of the last dir in path or '' if no dir found. - - Parameters - ---------- - path: str - Use dirs in this path - prefix: str - Use only dirs startings by this prefix - """ - matching_dirs = ( - dir_name - for dir_name in reversed(os.listdir(path)) - if os.path.isdir(os.path.join(path, dir_name)) and - dir_name.startswith(prefix) - ) - return next(matching_dirs, None) or '' - - -class EnvironmentInfo: - """ - Return environment variables for specified Microsoft Visual C++ version - and platform : Lib, Include, Path and libpath. - - This function is compatible with Microsoft Visual C++ 9.0 to 14.0. - - Script created by analysing Microsoft environment configuration files like - "vcvars[...].bat", "SetEnv.Cmd", "vcbuildtools.bat", ... - - Parameters - ---------- - arch: str - Target architecture. - vc_ver: float - Required Microsoft Visual C++ version. If not set, autodetect the last - version. - vc_min_ver: float - Minimum Microsoft Visual C++ version. - """ - - # Variables and properties in this class use originals CamelCase variables - # names from Microsoft source files for more easy comparaison. - - def __init__(self, arch, vc_ver=None, vc_min_ver=0): - self.pi = PlatformInfo(arch) - self.ri = RegistryInfo(self.pi) - self.si = SystemInfo(self.ri, vc_ver) - - if self.vc_ver < vc_min_ver: - err = 'No suitable Microsoft Visual C++ version found' - raise distutils.errors.DistutilsPlatformError(err) - - @property - def vc_ver(self): - """ - Microsoft Visual C++ version. - """ - return self.si.vc_ver - - @property - def VSTools(self): - """ - Microsoft Visual Studio Tools - """ - paths = [r'Common7\IDE', r'Common7\Tools'] - - if self.vc_ver >= 14.0: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - paths += [r'Common7\IDE\CommonExtensions\Microsoft\TestWindow'] - paths += [r'Team Tools\Performance Tools'] - paths += [r'Team Tools\Performance Tools%s' % arch_subdir] - - return [os.path.join(self.si.VSInstallDir, path) for path in paths] - - @property - def VCIncludes(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Includes - """ - return [os.path.join(self.si.VCInstallDir, 'Include'), - os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')] - - @property - def VCLibraries(self): - """ - Microsoft Visual C++ & Microsoft Foundation Class Libraries - """ - if self.vc_ver >= 15.0: - arch_subdir = self.pi.target_dir(x64=True) - else: - arch_subdir = self.pi.target_dir(hidex86=True) - paths = ['Lib%s' % arch_subdir, r'ATLMFC\Lib%s' % arch_subdir] - - if self.vc_ver >= 14.0: - paths += [r'Lib\store%s' % arch_subdir] - - return [os.path.join(self.si.VCInstallDir, path) for path in paths] - - @property - def VCStoreRefs(self): - """ - Microsoft Visual C++ store references Libraries - """ - if self.vc_ver < 14.0: - return [] - return [os.path.join(self.si.VCInstallDir, r'Lib\store\references')] - - @property - def VCTools(self): - """ - Microsoft Visual C++ Tools - """ - si = self.si - tools = [os.path.join(si.VCInstallDir, 'VCPackages')] - - forcex86 = True if self.vc_ver <= 10.0 else False - arch_subdir = self.pi.cross_dir(forcex86) - if arch_subdir: - tools += [os.path.join(si.VCInstallDir, 'Bin%s' % arch_subdir)] - - if self.vc_ver == 14.0: - path = 'Bin%s' % self.pi.current_dir(hidex86=True) - tools += [os.path.join(si.VCInstallDir, path)] - - elif self.vc_ver >= 15.0: - host_dir = (r'bin\HostX86%s' if self.pi.current_is_x86() else - r'bin\HostX64%s') - tools += [os.path.join( - si.VCInstallDir, host_dir % self.pi.target_dir(x64=True))] - - if self.pi.current_cpu != self.pi.target_cpu: - tools += [os.path.join( - si.VCInstallDir, host_dir % self.pi.current_dir(x64=True))] - - else: - tools += [os.path.join(si.VCInstallDir, 'Bin')] - - return tools - - @property - def OSLibraries(self): - """ - Microsoft Windows SDK Libraries - """ - if self.vc_ver <= 10.0: - arch_subdir = self.pi.target_dir(hidex86=True, x64=True) - return [os.path.join(self.si.WindowsSdkDir, 'Lib%s' % arch_subdir)] - - else: - arch_subdir = self.pi.target_dir(x64=True) - lib = os.path.join(self.si.WindowsSdkDir, 'lib') - libver = self._sdk_subdir - return [os.path.join(lib, '%sum%s' % (libver , arch_subdir))] - - @property - def OSIncludes(self): - """ - Microsoft Windows SDK Include - """ - include = os.path.join(self.si.WindowsSdkDir, 'include') - - if self.vc_ver <= 10.0: - return [include, os.path.join(include, 'gl')] - - else: - if self.vc_ver >= 14.0: - sdkver = self._sdk_subdir - else: - sdkver = '' - return [os.path.join(include, '%sshared' % sdkver), - os.path.join(include, '%sum' % sdkver), - os.path.join(include, '%swinrt' % sdkver)] - - @property - def OSLibpath(self): - """ - Microsoft Windows SDK Libraries Paths - """ - ref = os.path.join(self.si.WindowsSdkDir, 'References') - libpath = [] - - if self.vc_ver <= 9.0: - libpath += self.OSLibraries - - if self.vc_ver >= 11.0: - libpath += [os.path.join(ref, r'CommonConfiguration\Neutral')] - - if self.vc_ver >= 14.0: - libpath += [ - ref, - os.path.join(self.si.WindowsSdkDir, 'UnionMetadata'), - os.path.join( - ref, - 'Windows.Foundation.UniversalApiContract', - '1.0.0.0', - ), - os.path.join( - ref, - 'Windows.Foundation.FoundationContract', - '1.0.0.0', - ), - os.path.join( - ref, - 'Windows.Networking.Connectivity.WwanContract', - '1.0.0.0', - ), - os.path.join( - self.si.WindowsSdkDir, - 'ExtensionSDKs', - 'Microsoft.VCLibs', - '%0.1f' % self.vc_ver, - 'References', - 'CommonConfiguration', - 'neutral', - ), - ] - return libpath - - @property - def SdkTools(self): - """ - Microsoft Windows SDK Tools - """ - return list(self._sdk_tools()) - - def _sdk_tools(self): - """ - Microsoft Windows SDK Tools paths generator - """ - if self.vc_ver < 15.0: - bin_dir = 'Bin' if self.vc_ver <= 11.0 else r'Bin\x86' - yield os.path.join(self.si.WindowsSdkDir, bin_dir) - - if not self.pi.current_is_x86(): - arch_subdir = self.pi.current_dir(x64=True) - path = 'Bin%s' % arch_subdir - yield os.path.join(self.si.WindowsSdkDir, path) - - if self.vc_ver == 10.0 or self.vc_ver == 11.0: - if self.pi.target_is_x86(): - arch_subdir = '' - else: - arch_subdir = self.pi.current_dir(hidex86=True, x64=True) - path = r'Bin\NETFX 4.0 Tools%s' % arch_subdir - yield os.path.join(self.si.WindowsSdkDir, path) - - elif self.vc_ver >= 15.0: - path = os.path.join(self.si.WindowsSdkDir, 'Bin') - arch_subdir = self.pi.current_dir(x64=True) - sdkver = self.si.WindowsSdkLastVersion - yield os.path.join(path, '%s%s' % (sdkver, arch_subdir)) - - if self.si.WindowsSDKExecutablePath: - yield self.si.WindowsSDKExecutablePath - - @property - def _sdk_subdir(self): - """ - Microsoft Windows SDK version subdir - """ - ucrtver = self.si.WindowsSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def SdkSetup(self): - """ - Microsoft Windows SDK Setup - """ - if self.vc_ver > 9.0: - return [] - - return [os.path.join(self.si.WindowsSdkDir, 'Setup')] - - @property - def FxTools(self): - """ - Microsoft .NET Framework Tools - """ - pi = self.pi - si = self.si - - if self.vc_ver <= 10.0: - include32 = True - include64 = not pi.target_is_x86() and not pi.current_is_x86() - else: - include32 = pi.target_is_x86() or pi.current_is_x86() - include64 = pi.current_cpu == 'amd64' or pi.target_cpu == 'amd64' - - tools = [] - if include32: - tools += [os.path.join(si.FrameworkDir32, ver) - for ver in si.FrameworkVersion32] - if include64: - tools += [os.path.join(si.FrameworkDir64, ver) - for ver in si.FrameworkVersion64] - return tools - - @property - def NetFxSDKLibraries(self): - """ - Microsoft .Net Framework SDK Libraries - """ - if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - return [os.path.join(self.si.NetFxSdkDir, r'lib\um%s' % arch_subdir)] - - @property - def NetFxSDKIncludes(self): - """ - Microsoft .Net Framework SDK Includes - """ - if self.vc_ver < 14.0 or not self.si.NetFxSdkDir: - return [] - - return [os.path.join(self.si.NetFxSdkDir, r'include\um')] - - @property - def VsTDb(self): - """ - Microsoft Visual Studio Team System Database - """ - return [os.path.join(self.si.VSInstallDir, r'VSTSDB\Deploy')] - - @property - def MSBuild(self): - """ - Microsoft Build Engine - """ - if self.vc_ver < 12.0: - return [] - elif self.vc_ver < 15.0: - base_path = self.si.ProgramFilesx86 - arch_subdir = self.pi.current_dir(hidex86=True) - else: - base_path = self.si.VSInstallDir - arch_subdir = '' - - path = r'MSBuild\%0.1f\bin%s' % (self.vc_ver, arch_subdir) - build = [os.path.join(base_path, path)] - - if self.vc_ver >= 15.0: - # Add Roslyn C# & Visual Basic Compiler - build += [os.path.join(base_path, path, 'Roslyn')] - - return build - - @property - def HTMLHelpWorkshop(self): - """ - Microsoft HTML Help Workshop - """ - if self.vc_ver < 11.0: - return [] - - return [os.path.join(self.si.ProgramFilesx86, 'HTML Help Workshop')] - - @property - def UCRTLibraries(self): - """ - Microsoft Universal C Runtime SDK Libraries - """ - if self.vc_ver < 14.0: - return [] - - arch_subdir = self.pi.target_dir(x64=True) - lib = os.path.join(self.si.UniversalCRTSdkDir, 'lib') - ucrtver = self._ucrt_subdir - return [os.path.join(lib, '%sucrt%s' % (ucrtver, arch_subdir))] - - @property - def UCRTIncludes(self): - """ - Microsoft Universal C Runtime SDK Include - """ - if self.vc_ver < 14.0: - return [] - - include = os.path.join(self.si.UniversalCRTSdkDir, 'include') - return [os.path.join(include, '%sucrt' % self._ucrt_subdir)] - - @property - def _ucrt_subdir(self): - """ - Microsoft Universal C Runtime SDK version subdir - """ - ucrtver = self.si.UniversalCRTSdkLastVersion - return ('%s\\' % ucrtver) if ucrtver else '' - - @property - def FSharp(self): - """ - Microsoft Visual F# - """ - if self.vc_ver < 11.0 and self.vc_ver > 12.0: - return [] - - return self.si.FSharpInstallDir - - @property - def VCRuntimeRedist(self): - """ - Microsoft Visual C++ runtime redistribuable dll - """ - arch_subdir = self.pi.target_dir(x64=True) - if self.vc_ver < 15: - redist_path = self.si.VCInstallDir - vcruntime = 'redist%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' - else: - redist_path = self.si.VCInstallDir.replace('\\Tools', '\\Redist') - vcruntime = 'onecore%s\\Microsoft.VC%d0.CRT\\vcruntime%d0.dll' - - # Visual Studio 2017 is still Visual C++ 14.0 - dll_ver = 14.0 if self.vc_ver == 15 else self.vc_ver - - vcruntime = vcruntime % (arch_subdir, self.vc_ver, dll_ver) - return os.path.join(redist_path, vcruntime) - - def return_env(self, exists=True): - """ - Return environment dict. - - Parameters - ---------- - exists: bool - It True, only return existing paths. - """ - env = dict( - include=self._build_paths('include', - [self.VCIncludes, - self.OSIncludes, - self.UCRTIncludes, - self.NetFxSDKIncludes], - exists), - lib=self._build_paths('lib', - [self.VCLibraries, - self.OSLibraries, - self.FxTools, - self.UCRTLibraries, - self.NetFxSDKLibraries], - exists), - libpath=self._build_paths('libpath', - [self.VCLibraries, - self.FxTools, - self.VCStoreRefs, - self.OSLibpath], - exists), - path=self._build_paths('path', - [self.VCTools, - self.VSTools, - self.VsTDb, - self.SdkTools, - self.SdkSetup, - self.FxTools, - self.MSBuild, - self.HTMLHelpWorkshop, - self.FSharp], - exists), - ) - if self.vc_ver >= 14 and os.path.isfile(self.VCRuntimeRedist): - env['py_vcruntime_redist'] = self.VCRuntimeRedist - return env - - def _build_paths(self, name, spec_path_lists, exists): - """ - Given an environment variable name and specified paths, - return a pathsep-separated string of paths containing - unique, extant, directories from those paths and from - the environment variable. Raise an error if no paths - are resolved. - """ - # flatten spec_path_lists - spec_paths = itertools.chain.from_iterable(spec_path_lists) - env_paths = safe_env.get(name, '').split(os.pathsep) - paths = itertools.chain(spec_paths, env_paths) - extant_paths = list(filter(os.path.isdir, paths)) if exists else paths - if not extant_paths: - msg = "%s environment variable is empty" % name.upper() - raise distutils.errors.DistutilsPlatformError(msg) - unique_paths = self._unique_everseen(extant_paths) - return os.pathsep.join(unique_paths) - - # from Python docs - def _unique_everseen(self, iterable, key=None): - """ - List unique elements, preserving order. - Remember all elements ever seen. - - _unique_everseen('AAAABBBCCDAABBB') --> A B C D - - _unique_everseen('ABBCcAD', str.lower) --> A B C D - """ - seen = set() - seen_add = seen.add - if key is None: - for element in filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/namespaces.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/namespaces.py deleted file mode 100644 index dc16106..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/namespaces.py +++ /dev/null @@ -1,107 +0,0 @@ -import os -from distutils import log -import itertools - -from setuptools.extern.six.moves import map - - -flatten = itertools.chain.from_iterable - - -class Installer: - - nspkg_ext = '-nspkg.pth' - - def install_namespaces(self): - nsp = self._get_all_ns_packages() - if not nsp: - return - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - self.outputs.append(filename) - log.info("Installing %s", filename) - lines = map(self._gen_nspkg_line, nsp) - - if self.dry_run: - # always generate the lines, even in dry run - list(lines) - return - - with open(filename, 'wt') as f: - f.writelines(lines) - - def uninstall_namespaces(self): - filename, ext = os.path.splitext(self._get_target()) - filename += self.nspkg_ext - if not os.path.exists(filename): - return - log.info("Removing %s", filename) - os.remove(filename) - - def _get_target(self): - return self.target - - _nspkg_tmpl = ( - "import sys, types, os", - "has_mfs = sys.version_info > (3, 5)", - "p = os.path.join(%(root)s, *%(pth)r)", - "importlib = has_mfs and __import__('importlib.util')", - "has_mfs and __import__('importlib.machinery')", - "m = has_mfs and " - "sys.modules.setdefault(%(pkg)r, " - "importlib.util.module_from_spec(" - "importlib.machinery.PathFinder.find_spec(%(pkg)r, " - "[os.path.dirname(p)])))", - "m = m or " - "sys.modules.setdefault(%(pkg)r, types.ModuleType(%(pkg)r))", - "mp = (m or []) and m.__dict__.setdefault('__path__',[])", - "(p not in mp) and mp.append(p)", - ) - "lines for the namespace installer" - - _nspkg_tmpl_multi = ( - 'm and setattr(sys.modules[%(parent)r], %(child)r, m)', - ) - "additional line(s) when a parent package is indicated" - - def _get_root(self): - return "sys._getframe(1).f_locals['sitedir']" - - def _gen_nspkg_line(self, pkg): - # ensure pkg is not a unicode string under Python 2.7 - pkg = str(pkg) - pth = tuple(pkg.split('.')) - root = self._get_root() - tmpl_lines = self._nspkg_tmpl - parent, sep, child = pkg.rpartition('.') - if parent: - tmpl_lines += self._nspkg_tmpl_multi - return ';'.join(tmpl_lines) % locals() + '\n' - - def _get_all_ns_packages(self): - """Return sorted list of all package namespaces""" - pkgs = self.distribution.namespace_packages or [] - return sorted(flatten(map(self._pkg_names, pkgs))) - - @staticmethod - def _pkg_names(pkg): - """ - Given a namespace package, yield the components of that - package. - - >>> names = Installer._pkg_names('a.b.c') - >>> set(names) == set(['a', 'a.b', 'a.b.c']) - True - """ - parts = pkg.split('.') - while parts: - yield '.'.join(parts) - parts.pop() - - -class DevelopInstaller(Installer): - def _get_root(self): - return repr(str(self.egg_path)) - - def _get_target(self): - return self.egg_link diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/package_index.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/package_index.py deleted file mode 100644 index 914b5e6..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/package_index.py +++ /dev/null @@ -1,1119 +0,0 @@ -"""PyPI and direct package downloading""" -import sys -import os -import re -import shutil -import socket -import base64 -import hashlib -import itertools -from functools import wraps - -from setuptools.extern import six -from setuptools.extern.six.moves import urllib, http_client, configparser, map - -import setuptools -from pkg_resources import ( - CHECKOUT_DIST, Distribution, BINARY_DIST, normalize_path, SOURCE_DIST, - Environment, find_distributions, safe_name, safe_version, - to_filename, Requirement, DEVELOP_DIST, EGG_DIST, -) -from setuptools import ssl_support -from distutils import log -from distutils.errors import DistutilsError -from fnmatch import translate -from setuptools.py27compat import get_all_headers -from setuptools.py33compat import unescape -from setuptools.wheel import Wheel - -EGG_FRAGMENT = re.compile(r'^egg=([-A-Za-z0-9_.+!]+)$') -HREF = re.compile("""href\\s*=\\s*['"]?([^'"> ]+)""", re.I) -# this is here to fix emacs' cruddy broken syntax highlighting -PYPI_MD5 = re.compile( - '<a href="([^"#]+)">([^<]+)</a>\n\\s+\\(<a (?:title="MD5 hash"\n\\s+)' - 'href="[^?]+\\?:action=show_md5&digest=([0-9a-f]{32})">md5</a>\\)' -) -URL_SCHEME = re.compile('([-+.a-z0-9]{2,}):', re.I).match -EXTENSIONS = ".tar.gz .tar.bz2 .tar .zip .tgz".split() - -__all__ = [ - 'PackageIndex', 'distros_for_url', 'parse_bdist_wininst', - 'interpret_distro_name', -] - -_SOCKET_TIMEOUT = 15 - -_tmpl = "setuptools/{setuptools.__version__} Python-urllib/{py_major}" -user_agent = _tmpl.format(py_major=sys.version[:3], setuptools=setuptools) - - -def parse_requirement_arg(spec): - try: - return Requirement.parse(spec) - except ValueError: - raise DistutilsError( - "Not a URL, existing file, or requirement spec: %r" % (spec,) - ) - - -def parse_bdist_wininst(name): - """Return (base,pyversion) or (None,None) for possible .exe name""" - - lower = name.lower() - base, py_ver, plat = None, None, None - - if lower.endswith('.exe'): - if lower.endswith('.win32.exe'): - base = name[:-10] - plat = 'win32' - elif lower.startswith('.win32-py', -16): - py_ver = name[-7:-4] - base = name[:-16] - plat = 'win32' - elif lower.endswith('.win-amd64.exe'): - base = name[:-14] - plat = 'win-amd64' - elif lower.startswith('.win-amd64-py', -20): - py_ver = name[-7:-4] - base = name[:-20] - plat = 'win-amd64' - return base, py_ver, plat - - -def egg_info_for_url(url): - parts = urllib.parse.urlparse(url) - scheme, server, path, parameters, query, fragment = parts - base = urllib.parse.unquote(path.split('/')[-1]) - if server == 'sourceforge.net' and base == 'download': # XXX Yuck - base = urllib.parse.unquote(path.split('/')[-2]) - if '#' in base: - base, fragment = base.split('#', 1) - return base, fragment - - -def distros_for_url(url, metadata=None): - """Yield egg or source distribution objects that might be found at a URL""" - base, fragment = egg_info_for_url(url) - for dist in distros_for_location(url, base, metadata): - yield dist - if fragment: - match = EGG_FRAGMENT.match(fragment) - if match: - for dist in interpret_distro_name( - url, match.group(1), metadata, precedence=CHECKOUT_DIST - ): - yield dist - - -def distros_for_location(location, basename, metadata=None): - """Yield egg or source distribution objects based on basename""" - if basename.endswith('.egg.zip'): - basename = basename[:-4] # strip the .zip - if basename.endswith('.egg') and '-' in basename: - # only one, unambiguous interpretation - return [Distribution.from_location(location, basename, metadata)] - if basename.endswith('.whl') and '-' in basename: - wheel = Wheel(basename) - if not wheel.is_compatible(): - return [] - return [Distribution( - location=location, - project_name=wheel.project_name, - version=wheel.version, - # Increase priority over eggs. - precedence=EGG_DIST + 1, - )] - if basename.endswith('.exe'): - win_base, py_ver, platform = parse_bdist_wininst(basename) - if win_base is not None: - return interpret_distro_name( - location, win_base, metadata, py_ver, BINARY_DIST, platform - ) - # Try source distro extensions (.zip, .tgz, etc.) - # - for ext in EXTENSIONS: - if basename.endswith(ext): - basename = basename[:-len(ext)] - return interpret_distro_name(location, basename, metadata) - return [] # no extension matched - - -def distros_for_filename(filename, metadata=None): - """Yield possible egg or source distribution objects based on a filename""" - return distros_for_location( - normalize_path(filename), os.path.basename(filename), metadata - ) - - -def interpret_distro_name( - location, basename, metadata, py_version=None, precedence=SOURCE_DIST, - platform=None -): - """Generate alternative interpretations of a source distro name - - Note: if `location` is a filesystem filename, you should call - ``pkg_resources.normalize_path()`` on it before passing it to this - routine! - """ - # Generate alternative interpretations of a source distro name - # Because some packages are ambiguous as to name/versions split - # e.g. "adns-python-1.1.0", "egenix-mx-commercial", etc. - # So, we generate each possible interepretation (e.g. "adns, python-1.1.0" - # "adns-python, 1.1.0", and "adns-python-1.1.0, no version"). In practice, - # the spurious interpretations should be ignored, because in the event - # there's also an "adns" package, the spurious "python-1.1.0" version will - # compare lower than any numeric version number, and is therefore unlikely - # to match a request for it. It's still a potential problem, though, and - # in the long run PyPI and the distutils should go for "safe" names and - # versions in distribution archive names (sdist and bdist). - - parts = basename.split('-') - if not py_version and any(re.match(r'py\d\.\d$', p) for p in parts[2:]): - # it is a bdist_dumb, not an sdist -- bail out - return - - for p in range(1, len(parts) + 1): - yield Distribution( - location, metadata, '-'.join(parts[:p]), '-'.join(parts[p:]), - py_version=py_version, precedence=precedence, - platform=platform - ) - - -# From Python 2.7 docs -def unique_everseen(iterable, key=None): - "List unique elements, preserving order. Remember all elements ever seen." - # unique_everseen('AAAABBBCCDAABBB') --> A B C D - # unique_everseen('ABBCcAD', str.lower) --> A B C D - seen = set() - seen_add = seen.add - if key is None: - for element in six.moves.filterfalse(seen.__contains__, iterable): - seen_add(element) - yield element - else: - for element in iterable: - k = key(element) - if k not in seen: - seen_add(k) - yield element - - -def unique_values(func): - """ - Wrap a function returning an iterable such that the resulting iterable - only ever yields unique items. - """ - - @wraps(func) - def wrapper(*args, **kwargs): - return unique_everseen(func(*args, **kwargs)) - - return wrapper - - -REL = re.compile(r"""<([^>]*\srel\s*=\s*['"]?([^'">]+)[^>]*)>""", re.I) -# this line is here to fix emacs' cruddy broken syntax highlighting - - -@unique_values -def find_external_links(url, page): - """Find rel="homepage" and rel="download" links in `page`, yielding URLs""" - - for match in REL.finditer(page): - tag, rel = match.groups() - rels = set(map(str.strip, rel.lower().split(','))) - if 'homepage' in rels or 'download' in rels: - for match in HREF.finditer(tag): - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - for tag in ("<th>Home Page", "<th>Download URL"): - pos = page.find(tag) - if pos != -1: - match = HREF.search(page, pos) - if match: - yield urllib.parse.urljoin(url, htmldecode(match.group(1))) - - -class ContentChecker(object): - """ - A null content checker that defines the interface for checking content - """ - - def feed(self, block): - """ - Feed a block of data to the hash. - """ - return - - def is_valid(self): - """ - Check the hash. Return False if validation fails. - """ - return True - - def report(self, reporter, template): - """ - Call reporter with information about the checker (hash name) - substituted into the template. - """ - return - - -class HashChecker(ContentChecker): - pattern = re.compile( - r'(?P<hash_name>sha1|sha224|sha384|sha256|sha512|md5)=' - r'(?P<expected>[a-f0-9]+)' - ) - - def __init__(self, hash_name, expected): - self.hash_name = hash_name - self.hash = hashlib.new(hash_name) - self.expected = expected - - @classmethod - def from_url(cls, url): - "Construct a (possibly null) ContentChecker from a URL" - fragment = urllib.parse.urlparse(url)[-1] - if not fragment: - return ContentChecker() - match = cls.pattern.search(fragment) - if not match: - return ContentChecker() - return cls(**match.groupdict()) - - def feed(self, block): - self.hash.update(block) - - def is_valid(self): - return self.hash.hexdigest() == self.expected - - def report(self, reporter, template): - msg = template % self.hash_name - return reporter(msg) - - -class PackageIndex(Environment): - """A distribution index that scans web pages for download URLs""" - - def __init__( - self, index_url="https://pypi.python.org/simple", hosts=('*',), - ca_bundle=None, verify_ssl=True, *args, **kw - ): - Environment.__init__(self, *args, **kw) - self.index_url = index_url + "/" [:not index_url.endswith('/')] - self.scanned_urls = {} - self.fetched_urls = {} - self.package_pages = {} - self.allows = re.compile('|'.join(map(translate, hosts))).match - self.to_scan = [] - use_ssl = ( - verify_ssl - and ssl_support.is_available - and (ca_bundle or ssl_support.find_ca_bundle()) - ) - if use_ssl: - self.opener = ssl_support.opener_for(ca_bundle) - else: - self.opener = urllib.request.urlopen - - def process_url(self, url, retrieve=False): - """Evaluate a URL as a possible download, and maybe retrieve it""" - if url in self.scanned_urls and not retrieve: - return - self.scanned_urls[url] = True - if not URL_SCHEME(url): - self.process_filename(url) - return - else: - dists = list(distros_for_url(url)) - if dists: - if not self.url_ok(url): - return - self.debug("Found link: %s", url) - - if dists or not retrieve or url in self.fetched_urls: - list(map(self.add, dists)) - return # don't need the actual page - - if not self.url_ok(url): - self.fetched_urls[url] = True - return - - self.info("Reading %s", url) - self.fetched_urls[url] = True # prevent multiple fetch attempts - tmpl = "Download error on %s: %%s -- Some packages may not be found!" - f = self.open_url(url, tmpl % url) - if f is None: - return - self.fetched_urls[f.url] = True - if 'html' not in f.headers.get('content-type', '').lower(): - f.close() # not html, we can't process it - return - - base = f.url # handle redirects - page = f.read() - if not isinstance(page, str): - # In Python 3 and got bytes but want str. - if isinstance(f, urllib.error.HTTPError): - # Errors have no charset, assume latin1: - charset = 'latin-1' - else: - charset = f.headers.get_param('charset') or 'latin-1' - page = page.decode(charset, "ignore") - f.close() - for match in HREF.finditer(page): - link = urllib.parse.urljoin(base, htmldecode(match.group(1))) - self.process_url(link) - if url.startswith(self.index_url) and getattr(f, 'code', None) != 404: - page = self.process_index(url, page) - - def process_filename(self, fn, nested=False): - # process filenames or directories - if not os.path.exists(fn): - self.warn("Not found: %s", fn) - return - - if os.path.isdir(fn) and not nested: - path = os.path.realpath(fn) - for item in os.listdir(path): - self.process_filename(os.path.join(path, item), True) - - dists = distros_for_filename(fn) - if dists: - self.debug("Found: %s", fn) - list(map(self.add, dists)) - - def url_ok(self, url, fatal=False): - s = URL_SCHEME(url) - is_file = s and s.group(1).lower() == 'file' - if is_file or self.allows(urllib.parse.urlparse(url)[1]): - return True - msg = ( - "\nNote: Bypassing %s (disallowed host; see " - "http://bit.ly/2hrImnY for details).\n") - if fatal: - raise DistutilsError(msg % url) - else: - self.warn(msg, url) - - def scan_egg_links(self, search_path): - dirs = filter(os.path.isdir, search_path) - egg_links = ( - (path, entry) - for path in dirs - for entry in os.listdir(path) - if entry.endswith('.egg-link') - ) - list(itertools.starmap(self.scan_egg_link, egg_links)) - - def scan_egg_link(self, path, entry): - with open(os.path.join(path, entry)) as raw_lines: - # filter non-empty lines - lines = list(filter(None, map(str.strip, raw_lines))) - - if len(lines) != 2: - # format is not recognized; punt - return - - egg_path, setup_path = lines - - for dist in find_distributions(os.path.join(path, egg_path)): - dist.location = os.path.join(path, *lines) - dist.precedence = SOURCE_DIST - self.add(dist) - - def process_index(self, url, page): - """Process the contents of a PyPI page""" - - def scan(link): - # Process a URL to see if it's for a package page - if link.startswith(self.index_url): - parts = list(map( - urllib.parse.unquote, link[len(self.index_url):].split('/') - )) - if len(parts) == 2 and '#' not in parts[1]: - # it's a package page, sanitize and index it - pkg = safe_name(parts[0]) - ver = safe_version(parts[1]) - self.package_pages.setdefault(pkg.lower(), {})[link] = True - return to_filename(pkg), to_filename(ver) - return None, None - - # process an index page into the package-page index - for match in HREF.finditer(page): - try: - scan(urllib.parse.urljoin(url, htmldecode(match.group(1)))) - except ValueError: - pass - - pkg, ver = scan(url) # ensure this page is in the page index - if pkg: - # process individual package page - for new_url in find_external_links(url, page): - # Process the found URL - base, frag = egg_info_for_url(new_url) - if base.endswith('.py') and not frag: - if ver: - new_url += '#egg=%s-%s' % (pkg, ver) - else: - self.need_version_info(url) - self.scan_url(new_url) - - return PYPI_MD5.sub( - lambda m: '<a href="%s#md5=%s">%s</a>' % m.group(1, 3, 2), page - ) - else: - return "" # no sense double-scanning non-package pages - - def need_version_info(self, url): - self.scan_all( - "Page at %s links to .py file(s) without version info; an index " - "scan is required.", url - ) - - def scan_all(self, msg=None, *args): - if self.index_url not in self.fetched_urls: - if msg: - self.warn(msg, *args) - self.info( - "Scanning index of all packages (this may take a while)" - ) - self.scan_url(self.index_url) - - def find_packages(self, requirement): - self.scan_url(self.index_url + requirement.unsafe_name + '/') - - if not self.package_pages.get(requirement.key): - # Fall back to safe version of the name - self.scan_url(self.index_url + requirement.project_name + '/') - - if not self.package_pages.get(requirement.key): - # We couldn't find the target package, so search the index page too - self.not_found_in_index(requirement) - - for url in list(self.package_pages.get(requirement.key, ())): - # scan each page that might be related to the desired package - self.scan_url(url) - - def obtain(self, requirement, installer=None): - self.prescan() - self.find_packages(requirement) - for dist in self[requirement.key]: - if dist in requirement: - return dist - self.debug("%s does not match %s", requirement, dist) - return super(PackageIndex, self).obtain(requirement, installer) - - def check_hash(self, checker, filename, tfp): - """ - checker is a ContentChecker - """ - checker.report( - self.debug, - "Validating %%s checksum for %s" % filename) - if not checker.is_valid(): - tfp.close() - os.unlink(filename) - raise DistutilsError( - "%s validation failed for %s; " - "possible download problem?" - % (checker.hash.name, os.path.basename(filename)) - ) - - def add_find_links(self, urls): - """Add `urls` to the list that will be prescanned for searches""" - for url in urls: - if ( - self.to_scan is None # if we have already "gone online" - or not URL_SCHEME(url) # or it's a local file/directory - or url.startswith('file:') - or list(distros_for_url(url)) # or a direct package link - ): - # then go ahead and process it now - self.scan_url(url) - else: - # otherwise, defer retrieval till later - self.to_scan.append(url) - - def prescan(self): - """Scan urls scheduled for prescanning (e.g. --find-links)""" - if self.to_scan: - list(map(self.scan_url, self.to_scan)) - self.to_scan = None # from now on, go ahead and process immediately - - def not_found_in_index(self, requirement): - if self[requirement.key]: # we've seen at least one distro - meth, msg = self.info, "Couldn't retrieve index page for %r" - else: # no distros seen for this name, might be misspelled - meth, msg = ( - self.warn, - "Couldn't find index page for %r (maybe misspelled?)") - meth(msg, requirement.unsafe_name) - self.scan_all() - - def download(self, spec, tmpdir): - """Locate and/or download `spec` to `tmpdir`, returning a local path - - `spec` may be a ``Requirement`` object, or a string containing a URL, - an existing local filename, or a project/version requirement spec - (i.e. the string form of a ``Requirement`` object). If it is the URL - of a .py file with an unambiguous ``#egg=name-version`` tag (i.e., one - that escapes ``-`` as ``_`` throughout), a trivial ``setup.py`` is - automatically created alongside the downloaded file. - - If `spec` is a ``Requirement`` object or a string containing a - project/version requirement spec, this method returns the location of - a matching distribution (possibly after downloading it to `tmpdir`). - If `spec` is a locally existing file or directory name, it is simply - returned unchanged. If `spec` is a URL, it is downloaded to a subpath - of `tmpdir`, and the local filename is returned. Various errors may be - raised if a problem occurs during downloading. - """ - if not isinstance(spec, Requirement): - scheme = URL_SCHEME(spec) - if scheme: - # It's a url, download it to tmpdir - found = self._download_url(scheme.group(1), spec, tmpdir) - base, fragment = egg_info_for_url(spec) - if base.endswith('.py'): - found = self.gen_setup(found, fragment, tmpdir) - return found - elif os.path.exists(spec): - # Existing file or directory, just return it - return spec - else: - spec = parse_requirement_arg(spec) - return getattr(self.fetch_distribution(spec, tmpdir), 'location', None) - - def fetch_distribution( - self, requirement, tmpdir, force_scan=False, source=False, - develop_ok=False, local_index=None): - """Obtain a distribution suitable for fulfilling `requirement` - - `requirement` must be a ``pkg_resources.Requirement`` instance. - If necessary, or if the `force_scan` flag is set, the requirement is - searched for in the (online) package index as well as the locally - installed packages. If a distribution matching `requirement` is found, - the returned distribution's ``location`` is the value you would have - gotten from calling the ``download()`` method with the matching - distribution's URL or filename. If no matching distribution is found, - ``None`` is returned. - - If the `source` flag is set, only source distributions and source - checkout links will be considered. Unless the `develop_ok` flag is - set, development and system eggs (i.e., those using the ``.egg-info`` - format) will be ignored. - """ - # process a Requirement - self.info("Searching for %s", requirement) - skipped = {} - dist = None - - def find(req, env=None): - if env is None: - env = self - # Find a matching distribution; may be called more than once - - for dist in env[req.key]: - - if dist.precedence == DEVELOP_DIST and not develop_ok: - if dist not in skipped: - self.warn( - "Skipping development or system egg: %s", dist, - ) - skipped[dist] = 1 - continue - - test = ( - dist in req - and (dist.precedence <= SOURCE_DIST or not source) - ) - if test: - loc = self.download(dist.location, tmpdir) - dist.download_location = loc - if os.path.exists(dist.download_location): - return dist - - if force_scan: - self.prescan() - self.find_packages(requirement) - dist = find(requirement) - - if not dist and local_index is not None: - dist = find(requirement, local_index) - - if dist is None: - if self.to_scan is not None: - self.prescan() - dist = find(requirement) - - if dist is None and not force_scan: - self.find_packages(requirement) - dist = find(requirement) - - if dist is None: - self.warn( - "No local packages or working download links found for %s%s", - (source and "a source distribution of " or ""), - requirement, - ) - else: - self.info("Best match: %s", dist) - return dist.clone(location=dist.download_location) - - def fetch(self, requirement, tmpdir, force_scan=False, source=False): - """Obtain a file suitable for fulfilling `requirement` - - DEPRECATED; use the ``fetch_distribution()`` method now instead. For - backward compatibility, this routine is identical but returns the - ``location`` of the downloaded distribution instead of a distribution - object. - """ - dist = self.fetch_distribution(requirement, tmpdir, force_scan, source) - if dist is not None: - return dist.location - return None - - def gen_setup(self, filename, fragment, tmpdir): - match = EGG_FRAGMENT.match(fragment) - dists = match and [ - d for d in - interpret_distro_name(filename, match.group(1), None) if d.version - ] or [] - - if len(dists) == 1: # unambiguous ``#egg`` fragment - basename = os.path.basename(filename) - - # Make sure the file has been downloaded to the temp dir. - if os.path.dirname(filename) != tmpdir: - dst = os.path.join(tmpdir, basename) - from setuptools.command.easy_install import samefile - if not samefile(filename, dst): - shutil.copy2(filename, dst) - filename = dst - - with open(os.path.join(tmpdir, 'setup.py'), 'w') as file: - file.write( - "from setuptools import setup\n" - "setup(name=%r, version=%r, py_modules=[%r])\n" - % ( - dists[0].project_name, dists[0].version, - os.path.splitext(basename)[0] - ) - ) - return filename - - elif match: - raise DistutilsError( - "Can't unambiguously interpret project/version identifier %r; " - "any dashes in the name or version should be escaped using " - "underscores. %r" % (fragment, dists) - ) - else: - raise DistutilsError( - "Can't process plain .py files without an '#egg=name-version'" - " suffix to enable automatic setup script generation." - ) - - dl_blocksize = 8192 - - def _download_to(self, url, filename): - self.info("Downloading %s", url) - # Download the file - fp = None - try: - checker = HashChecker.from_url(url) - fp = self.open_url(url) - if isinstance(fp, urllib.error.HTTPError): - raise DistutilsError( - "Can't download %s: %s %s" % (url, fp.code, fp.msg) - ) - headers = fp.info() - blocknum = 0 - bs = self.dl_blocksize - size = -1 - if "content-length" in headers: - # Some servers return multiple Content-Length headers :( - sizes = get_all_headers(headers, 'Content-Length') - size = max(map(int, sizes)) - self.reporthook(url, filename, blocknum, bs, size) - with open(filename, 'wb') as tfp: - while True: - block = fp.read(bs) - if block: - checker.feed(block) - tfp.write(block) - blocknum += 1 - self.reporthook(url, filename, blocknum, bs, size) - else: - break - self.check_hash(checker, filename, tfp) - return headers - finally: - if fp: - fp.close() - - def reporthook(self, url, filename, blocknum, blksize, size): - pass # no-op - - def open_url(self, url, warning=None): - if url.startswith('file:'): - return local_open(url) - try: - return open_with_auth(url, self.opener) - except (ValueError, http_client.InvalidURL) as v: - msg = ' '.join([str(arg) for arg in v.args]) - if warning: - self.warn(warning, msg) - else: - raise DistutilsError('%s %s' % (url, msg)) - except urllib.error.HTTPError as v: - return v - except urllib.error.URLError as v: - if warning: - self.warn(warning, v.reason) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v.reason)) - except http_client.BadStatusLine as v: - if warning: - self.warn(warning, v.line) - else: - raise DistutilsError( - '%s returned a bad status line. The server might be ' - 'down, %s' % - (url, v.line) - ) - except (http_client.HTTPException, socket.error) as v: - if warning: - self.warn(warning, v) - else: - raise DistutilsError("Download error for %s: %s" - % (url, v)) - - def _download_url(self, scheme, url, tmpdir): - # Determine download filename - # - name, fragment = egg_info_for_url(url) - if name: - while '..' in name: - name = name.replace('..', '.').replace('\\', '_') - else: - name = "__downloaded__" # default if URL has no path contents - - if name.endswith('.egg.zip'): - name = name[:-4] # strip the extra .zip before download - - filename = os.path.join(tmpdir, name) - - # Download the file - # - if scheme == 'svn' or scheme.startswith('svn+'): - return self._download_svn(url, filename) - elif scheme == 'git' or scheme.startswith('git+'): - return self._download_git(url, filename) - elif scheme.startswith('hg+'): - return self._download_hg(url, filename) - elif scheme == 'file': - return urllib.request.url2pathname(urllib.parse.urlparse(url)[2]) - else: - self.url_ok(url, True) # raises error if not allowed - return self._attempt_download(url, filename) - - def scan_url(self, url): - self.process_url(url, True) - - def _attempt_download(self, url, filename): - headers = self._download_to(url, filename) - if 'html' in headers.get('content-type', '').lower(): - return self._download_html(url, headers, filename) - else: - return filename - - def _download_html(self, url, headers, filename): - file = open(filename) - for line in file: - if line.strip(): - # Check for a subversion index page - if re.search(r'<title>([^- ]+ - )?Revision \d+:', line): - # it's a subversion index page: - file.close() - os.unlink(filename) - return self._download_svn(url, filename) - break # not an index page - file.close() - os.unlink(filename) - raise DistutilsError("Unexpected HTML page found at " + url) - - def _download_svn(self, url, filename): - url = url.split('#', 1)[0] # remove any fragment for svn's sake - creds = '' - if url.lower().startswith('svn:') and '@' in url: - scheme, netloc, path, p, q, f = urllib.parse.urlparse(url) - if not netloc and path.startswith('//') and '/' in path[2:]: - netloc, path = path[2:].split('/', 1) - auth, host = urllib.parse.splituser(netloc) - if auth: - if ':' in auth: - user, pw = auth.split(':', 1) - creds = " --username=%s --password=%s" % (user, pw) - else: - creds = " --username=" + auth - netloc = host - parts = scheme, netloc, url, p, q, f - url = urllib.parse.urlunparse(parts) - self.info("Doing subversion checkout from %s to %s", url, filename) - os.system("svn checkout%s -q %s %s" % (creds, url, filename)) - return filename - - @staticmethod - def _vcs_split_rev_from_url(url, pop_prefix=False): - scheme, netloc, path, query, frag = urllib.parse.urlsplit(url) - - scheme = scheme.split('+', 1)[-1] - - # Some fragment identification fails - path = path.split('#', 1)[0] - - rev = None - if '@' in path: - path, rev = path.rsplit('@', 1) - - # Also, discard fragment - url = urllib.parse.urlunsplit((scheme, netloc, path, query, '')) - - return url, rev - - def _download_git(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing git clone from %s to %s", url, filename) - os.system("git clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Checking out %s", rev) - os.system("(cd %s && git checkout --quiet %s)" % ( - filename, - rev, - )) - - return filename - - def _download_hg(self, url, filename): - filename = filename.split('#', 1)[0] - url, rev = self._vcs_split_rev_from_url(url, pop_prefix=True) - - self.info("Doing hg clone from %s to %s", url, filename) - os.system("hg clone --quiet %s %s" % (url, filename)) - - if rev is not None: - self.info("Updating to %s", rev) - os.system("(cd %s && hg up -C -r %s -q)" % ( - filename, - rev, - )) - - return filename - - def debug(self, msg, *args): - log.debug(msg, *args) - - def info(self, msg, *args): - log.info(msg, *args) - - def warn(self, msg, *args): - log.warn(msg, *args) - - -# This pattern matches a character entity reference (a decimal numeric -# references, a hexadecimal numeric reference, or a named reference). -entity_sub = re.compile(r'&(#(\d+|x[\da-fA-F]+)|[\w.:-]+);?').sub - - -def decode_entity(match): - what = match.group(1) - return unescape(what) - - -def htmldecode(text): - """Decode HTML entities in the given text.""" - return entity_sub(decode_entity, text) - - -def socket_timeout(timeout=15): - def _socket_timeout(func): - def _socket_timeout(*args, **kwargs): - old_timeout = socket.getdefaulttimeout() - socket.setdefaulttimeout(timeout) - try: - return func(*args, **kwargs) - finally: - socket.setdefaulttimeout(old_timeout) - - return _socket_timeout - - return _socket_timeout - - -def _encode_auth(auth): - """ - A function compatible with Python 2.3-3.3 that will encode - auth from a URL suitable for an HTTP header. - >>> str(_encode_auth('username%3Apassword')) - 'dXNlcm5hbWU6cGFzc3dvcmQ=' - - Long auth strings should not cause a newline to be inserted. - >>> long_auth = 'username:' + 'password'*10 - >>> chr(10) in str(_encode_auth(long_auth)) - False - """ - auth_s = urllib.parse.unquote(auth) - # convert to bytes - auth_bytes = auth_s.encode() - # use the legacy interface for Python 2.3 support - encoded_bytes = base64.encodestring(auth_bytes) - # convert back to a string - encoded = encoded_bytes.decode() - # strip the trailing carriage return - return encoded.replace('\n', '') - - -class Credential(object): - """ - A username/password pair. Use like a namedtuple. - """ - - def __init__(self, username, password): - self.username = username - self.password = password - - def __iter__(self): - yield self.username - yield self.password - - def __str__(self): - return '%(username)s:%(password)s' % vars(self) - - -class PyPIConfig(configparser.RawConfigParser): - def __init__(self): - """ - Load from ~/.pypirc - """ - defaults = dict.fromkeys(['username', 'password', 'repository'], '') - configparser.RawConfigParser.__init__(self, defaults) - - rc = os.path.join(os.path.expanduser('~'), '.pypirc') - if os.path.exists(rc): - self.read(rc) - - @property - def creds_by_repository(self): - sections_with_repositories = [ - section for section in self.sections() - if self.get(section, 'repository').strip() - ] - - return dict(map(self._get_repo_cred, sections_with_repositories)) - - def _get_repo_cred(self, section): - repo = self.get(section, 'repository').strip() - return repo, Credential( - self.get(section, 'username').strip(), - self.get(section, 'password').strip(), - ) - - def find_credential(self, url): - """ - If the URL indicated appears to be a repository defined in this - config, return the credential for that repository. - """ - for repository, cred in self.creds_by_repository.items(): - if url.startswith(repository): - return cred - - -def open_with_auth(url, opener=urllib.request.urlopen): - """Open a urllib2 request, handling HTTP authentication""" - - scheme, netloc, path, params, query, frag = urllib.parse.urlparse(url) - - # Double scheme does not raise on Mac OS X as revealed by a - # failing test. We would expect "nonnumeric port". Refs #20. - if netloc.endswith(':'): - raise http_client.InvalidURL("nonnumeric port: ''") - - if scheme in ('http', 'https'): - auth, host = urllib.parse.splituser(netloc) - else: - auth = None - - if not auth: - cred = PyPIConfig().find_credential(url) - if cred: - auth = str(cred) - info = cred.username, url - log.info('Authenticating as %s for %s (from .pypirc)', *info) - - if auth: - auth = "Basic " + _encode_auth(auth) - parts = scheme, host, path, params, query, frag - new_url = urllib.parse.urlunparse(parts) - request = urllib.request.Request(new_url) - request.add_header("Authorization", auth) - else: - request = urllib.request.Request(url) - - request.add_header('User-Agent', user_agent) - fp = opener(request) - - if auth: - # Put authentication info back into request URL if same host, - # so that links found on the page will work - s2, h2, path2, param2, query2, frag2 = urllib.parse.urlparse(fp.url) - if s2 == scheme and h2 == host: - parts = s2, netloc, path2, param2, query2, frag2 - fp.url = urllib.parse.urlunparse(parts) - - return fp - - -# adding a timeout to avoid freezing package_index -open_with_auth = socket_timeout(_SOCKET_TIMEOUT)(open_with_auth) - - -def fix_sf_url(url): - return url # backward compatibility - - -def local_open(url): - """Read a local path, with special support for directories""" - scheme, server, path, param, query, frag = urllib.parse.urlparse(url) - filename = urllib.request.url2pathname(path) - if os.path.isfile(filename): - return urllib.request.urlopen(url) - elif path.endswith('/') and os.path.isdir(filename): - files = [] - for f in os.listdir(filename): - filepath = os.path.join(filename, f) - if f == 'index.html': - with open(filepath, 'r') as fp: - body = fp.read() - break - elif os.path.isdir(filepath): - f += '/' - files.append('<a href="{name}">{name}</a>'.format(name=f)) - else: - tmpl = ( - "<html><head><title>{url}" - "{files}") - body = tmpl.format(url=url, files='\n'.join(files)) - status, message = 200, "OK" - else: - status, message, body = 404, "Path not found", "Not found" - - headers = {'content-type': 'text/html'} - body_stream = six.StringIO(body) - return urllib.error.HTTPError(url, status, message, headers, body_stream) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/pep425tags.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/pep425tags.py deleted file mode 100644 index dfe55d5..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/pep425tags.py +++ /dev/null @@ -1,316 +0,0 @@ -# This file originally from pip: -# https://github.com/pypa/pip/blob/8f4f15a5a95d7d5b511ceaee9ed261176c181970/src/pip/_internal/pep425tags.py -"""Generate and work with PEP 425 Compatibility Tags.""" -from __future__ import absolute_import - -import distutils.util -import platform -import re -import sys -import sysconfig -import warnings -from collections import OrderedDict - -from . import glibc - -_osx_arch_pat = re.compile(r'(.+)_(\d+)_(\d+)_(.+)') - - -def get_config_var(var): - try: - return sysconfig.get_config_var(var) - except IOError as e: # Issue #1074 - warnings.warn("{}".format(e), RuntimeWarning) - return None - - -def get_abbr_impl(): - """Return abbreviated implementation name.""" - if hasattr(sys, 'pypy_version_info'): - pyimpl = 'pp' - elif sys.platform.startswith('java'): - pyimpl = 'jy' - elif sys.platform == 'cli': - pyimpl = 'ip' - else: - pyimpl = 'cp' - return pyimpl - - -def get_impl_ver(): - """Return implementation version.""" - impl_ver = get_config_var("py_version_nodot") - if not impl_ver or get_abbr_impl() == 'pp': - impl_ver = ''.join(map(str, get_impl_version_info())) - return impl_ver - - -def get_impl_version_info(): - """Return sys.version_info-like tuple for use in decrementing the minor - version.""" - if get_abbr_impl() == 'pp': - # as per https://github.com/pypa/pip/issues/2882 - return (sys.version_info[0], sys.pypy_version_info.major, - sys.pypy_version_info.minor) - else: - return sys.version_info[0], sys.version_info[1] - - -def get_impl_tag(): - """ - Returns the Tag for this specific implementation. - """ - return "{}{}".format(get_abbr_impl(), get_impl_ver()) - - -def get_flag(var, fallback, expected=True, warn=True): - """Use a fallback method for determining SOABI flags if the needed config - var is unset or unavailable.""" - val = get_config_var(var) - if val is None: - if warn: - warnings.warn("Config variable '{0}' is unset, Python ABI tag may " - "be incorrect".format(var), RuntimeWarning, 2) - return fallback() - return val == expected - - -def get_abi_tag(): - """Return the ABI tag based on SOABI (if available) or emulate SOABI - (CPython 2, PyPy).""" - soabi = get_config_var('SOABI') - impl = get_abbr_impl() - if not soabi and impl in {'cp', 'pp'} and hasattr(sys, 'maxunicode'): - d = '' - m = '' - u = '' - if get_flag('Py_DEBUG', - lambda: hasattr(sys, 'gettotalrefcount'), - warn=(impl == 'cp')): - d = 'd' - if get_flag('WITH_PYMALLOC', - lambda: impl == 'cp', - warn=(impl == 'cp')): - m = 'm' - if get_flag('Py_UNICODE_SIZE', - lambda: sys.maxunicode == 0x10ffff, - expected=4, - warn=(impl == 'cp' and - sys.version_info < (3, 3))) \ - and sys.version_info < (3, 3): - u = 'u' - abi = '%s%s%s%s%s' % (impl, get_impl_ver(), d, m, u) - elif soabi and soabi.startswith('cpython-'): - abi = 'cp' + soabi.split('-')[1] - elif soabi: - abi = soabi.replace('.', '_').replace('-', '_') - else: - abi = None - return abi - - -def _is_running_32bit(): - return sys.maxsize == 2147483647 - - -def get_platform(): - """Return our platform name 'win32', 'linux_x86_64'""" - if sys.platform == 'darwin': - # distutils.util.get_platform() returns the release based on the value - # of MACOSX_DEPLOYMENT_TARGET on which Python was built, which may - # be significantly older than the user's current machine. - release, _, machine = platform.mac_ver() - split_ver = release.split('.') - - if machine == "x86_64" and _is_running_32bit(): - machine = "i386" - elif machine == "ppc64" and _is_running_32bit(): - machine = "ppc" - - return 'macosx_{}_{}_{}'.format(split_ver[0], split_ver[1], machine) - - # XXX remove distutils dependency - result = distutils.util.get_platform().replace('.', '_').replace('-', '_') - if result == "linux_x86_64" and _is_running_32bit(): - # 32 bit Python program (running on a 64 bit Linux): pip should only - # install and run 32 bit compiled extensions in that case. - result = "linux_i686" - - return result - - -def is_manylinux1_compatible(): - # Only Linux, and only x86-64 / i686 - if get_platform() not in {"linux_x86_64", "linux_i686"}: - return False - - # Check for presence of _manylinux module - try: - import _manylinux - return bool(_manylinux.manylinux1_compatible) - except (ImportError, AttributeError): - # Fall through to heuristic check below - pass - - # Check glibc version. CentOS 5 uses glibc 2.5. - return glibc.have_compatible_glibc(2, 5) - - -def get_darwin_arches(major, minor, machine): - """Return a list of supported arches (including group arches) for - the given major, minor and machine architecture of an macOS machine. - """ - arches = [] - - def _supports_arch(major, minor, arch): - # Looking at the application support for macOS versions in the chart - # provided by https://en.wikipedia.org/wiki/OS_X#Versions it appears - # our timeline looks roughly like: - # - # 10.0 - Introduces ppc support. - # 10.4 - Introduces ppc64, i386, and x86_64 support, however the ppc64 - # and x86_64 support is CLI only, and cannot be used for GUI - # applications. - # 10.5 - Extends ppc64 and x86_64 support to cover GUI applications. - # 10.6 - Drops support for ppc64 - # 10.7 - Drops support for ppc - # - # Given that we do not know if we're installing a CLI or a GUI - # application, we must be conservative and assume it might be a GUI - # application and behave as if ppc64 and x86_64 support did not occur - # until 10.5. - # - # Note: The above information is taken from the "Application support" - # column in the chart not the "Processor support" since I believe - # that we care about what instruction sets an application can use - # not which processors the OS supports. - if arch == 'ppc': - return (major, minor) <= (10, 5) - if arch == 'ppc64': - return (major, minor) == (10, 5) - if arch == 'i386': - return (major, minor) >= (10, 4) - if arch == 'x86_64': - return (major, minor) >= (10, 5) - if arch in groups: - for garch in groups[arch]: - if _supports_arch(major, minor, garch): - return True - return False - - groups = OrderedDict([ - ("fat", ("i386", "ppc")), - ("intel", ("x86_64", "i386")), - ("fat64", ("x86_64", "ppc64")), - ("fat32", ("x86_64", "i386", "ppc")), - ]) - - if _supports_arch(major, minor, machine): - arches.append(machine) - - for garch in groups: - if machine in groups[garch] and _supports_arch(major, minor, garch): - arches.append(garch) - - arches.append('universal') - - return arches - - -def get_supported(versions=None, noarch=False, platform=None, - impl=None, abi=None): - """Return a list of supported tags for each version specified in - `versions`. - - :param versions: a list of string versions, of the form ["33", "32"], - or None. The first version will be assumed to support our ABI. - :param platform: specify the exact platform you want valid - tags for, or None. If None, use the local system platform. - :param impl: specify the exact implementation you want valid - tags for, or None. If None, use the local interpreter impl. - :param abi: specify the exact abi you want valid - tags for, or None. If None, use the local interpreter abi. - """ - supported = [] - - # Versions must be given with respect to the preference - if versions is None: - versions = [] - version_info = get_impl_version_info() - major = version_info[:-1] - # Support all previous minor Python versions. - for minor in range(version_info[-1], -1, -1): - versions.append(''.join(map(str, major + (minor,)))) - - impl = impl or get_abbr_impl() - - abis = [] - - abi = abi or get_abi_tag() - if abi: - abis[0:0] = [abi] - - abi3s = set() - import imp - for suffix in imp.get_suffixes(): - if suffix[0].startswith('.abi'): - abi3s.add(suffix[0].split('.', 2)[1]) - - abis.extend(sorted(list(abi3s))) - - abis.append('none') - - if not noarch: - arch = platform or get_platform() - if arch.startswith('macosx'): - # support macosx-10.6-intel on macosx-10.9-x86_64 - match = _osx_arch_pat.match(arch) - if match: - name, major, minor, actual_arch = match.groups() - tpl = '{}_{}_%i_%s'.format(name, major) - arches = [] - for m in reversed(range(int(minor) + 1)): - for a in get_darwin_arches(int(major), m, actual_arch): - arches.append(tpl % (m, a)) - else: - # arch pattern didn't match (?!) - arches = [arch] - elif platform is None and is_manylinux1_compatible(): - arches = [arch.replace('linux', 'manylinux1'), arch] - else: - arches = [arch] - - # Current version, current API (built specifically for our Python): - for abi in abis: - for arch in arches: - supported.append(('%s%s' % (impl, versions[0]), abi, arch)) - - # abi3 modules compatible with older version of Python - for version in versions[1:]: - # abi3 was introduced in Python 3.2 - if version in {'31', '30'}: - break - for abi in abi3s: # empty set if not Python 3 - for arch in arches: - supported.append(("%s%s" % (impl, version), abi, arch)) - - # Has binaries, does not use the Python API: - for arch in arches: - supported.append(('py%s' % (versions[0][0]), 'none', arch)) - - # No abi / arch, but requires our implementation: - supported.append(('%s%s' % (impl, versions[0]), 'none', 'any')) - # Tagged specifically as being cross-version compatible - # (with just the major version specified) - supported.append(('%s%s' % (impl, versions[0][0]), 'none', 'any')) - - # No abi / arch, generic Python - for i, version in enumerate(versions): - supported.append(('py%s' % (version,), 'none', 'any')) - if i == 0: - supported.append(('py%s' % (version[0]), 'none', 'any')) - - return supported - - -implementation_tag = get_impl_tag() diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/py27compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/py27compat.py deleted file mode 100644 index 2985011..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/py27compat.py +++ /dev/null @@ -1,28 +0,0 @@ -""" -Compatibility Support for Python 2.7 and earlier -""" - -import platform - -from setuptools.extern import six - - -def get_all_headers(message, key): - """ - Given an HTTPMessage, return all headers matching a given key. - """ - return message.get_all(key) - - -if six.PY2: - def get_all_headers(message, key): - return message.getheaders(key) - - -linux_py2_ascii = ( - platform.system() == 'Linux' and - six.PY2 -) - -rmtree_safe = str if linux_py2_ascii else lambda x: x -"""Workaround for http://bugs.python.org/issue24672""" diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/py31compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/py31compat.py deleted file mode 100644 index 4ea9532..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/py31compat.py +++ /dev/null @@ -1,41 +0,0 @@ -__all__ = ['get_config_vars', 'get_path'] - -try: - # Python 2.7 or >=3.2 - from sysconfig import get_config_vars, get_path -except ImportError: - from distutils.sysconfig import get_config_vars, get_python_lib - - def get_path(name): - if name not in ('platlib', 'purelib'): - raise ValueError("Name must be purelib or platlib") - return get_python_lib(name == 'platlib') - - -try: - # Python >=3.2 - from tempfile import TemporaryDirectory -except ImportError: - import shutil - import tempfile - - class TemporaryDirectory(object): - """ - Very simple temporary directory context manager. - Will try to delete afterward, but will also ignore OS and similar - errors on deletion. - """ - - def __init__(self): - self.name = None # Handle mkdtemp raising an exception - self.name = tempfile.mkdtemp() - - def __enter__(self): - return self.name - - def __exit__(self, exctype, excvalue, exctrace): - try: - shutil.rmtree(self.name, True) - except OSError: # removal errors are not the only possible - pass - self.name = None diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/py33compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/py33compat.py deleted file mode 100644 index 2a73ebb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/py33compat.py +++ /dev/null @@ -1,54 +0,0 @@ -import dis -import array -import collections - -try: - import html -except ImportError: - html = None - -from setuptools.extern import six -from setuptools.extern.six.moves import html_parser - - -OpArg = collections.namedtuple('OpArg', 'opcode arg') - - -class Bytecode_compat(object): - def __init__(self, code): - self.code = code - - def __iter__(self): - """Yield '(op,arg)' pair for each operation in code object 'code'""" - - bytes = array.array('b', self.code.co_code) - eof = len(self.code.co_code) - - ptr = 0 - extended_arg = 0 - - while ptr < eof: - - op = bytes[ptr] - - if op >= dis.HAVE_ARGUMENT: - - arg = bytes[ptr + 1] + bytes[ptr + 2] * 256 + extended_arg - ptr += 3 - - if op == dis.EXTENDED_ARG: - long_type = six.integer_types[-1] - extended_arg = arg * long_type(65536) - continue - - else: - arg = None - ptr += 1 - - yield OpArg(op, arg) - - -Bytecode = getattr(dis, 'Bytecode', Bytecode_compat) - - -unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/py36compat.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/py36compat.py deleted file mode 100644 index f527969..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/py36compat.py +++ /dev/null @@ -1,82 +0,0 @@ -import sys -from distutils.errors import DistutilsOptionError -from distutils.util import strtobool -from distutils.debug import DEBUG - - -class Distribution_parse_config_files: - """ - Mix-in providing forward-compatibility for functionality to be - included by default on Python 3.7. - - Do not edit the code in this class except to update functionality - as implemented in distutils. - """ - def parse_config_files(self, filenames=None): - from configparser import ConfigParser - - # Ignore install directory options if we have a venv - if sys.prefix != sys.base_prefix: - ignore_options = [ - 'install-base', 'install-platbase', 'install-lib', - 'install-platlib', 'install-purelib', 'install-headers', - 'install-scripts', 'install-data', 'prefix', 'exec-prefix', - 'home', 'user', 'root'] - else: - ignore_options = [] - - ignore_options = frozenset(ignore_options) - - if filenames is None: - filenames = self.find_config_files() - - if DEBUG: - self.announce("Distribution.parse_config_files():") - - parser = ConfigParser(interpolation=None) - for filename in filenames: - if DEBUG: - self.announce(" reading %s" % filename) - parser.read(filename) - for section in parser.sections(): - options = parser.options(section) - opt_dict = self.get_option_dict(section) - - for opt in options: - if opt != '__name__' and opt not in ignore_options: - val = parser.get(section,opt) - opt = opt.replace('-', '_') - opt_dict[opt] = (filename, val) - - # Make the ConfigParser forget everything (so we retain - # the original filenames that options come from) - parser.__init__() - - # If there was a "global" section in the config file, use it - # to set Distribution options. - - if 'global' in self.command_options: - for (opt, (src, val)) in self.command_options['global'].items(): - alias = self.negative_opt.get(opt) - try: - if alias: - setattr(self, alias, not strtobool(val)) - elif opt in ('verbose', 'dry_run'): # ugh! - setattr(self, opt, strtobool(val)) - else: - setattr(self, opt, val) - except ValueError as msg: - raise DistutilsOptionError(msg) - - -if sys.version_info < (3,): - # Python 2 behavior is sufficient - class Distribution_parse_config_files: - pass - - -if False: - # When updated behavior is available upstream, - # disable override here. - class Distribution_parse_config_files: - pass diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/sandbox.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/sandbox.py deleted file mode 100644 index 685f3f7..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/sandbox.py +++ /dev/null @@ -1,491 +0,0 @@ -import os -import sys -import tempfile -import operator -import functools -import itertools -import re -import contextlib -import pickle -import textwrap - -from setuptools.extern import six -from setuptools.extern.six.moves import builtins, map - -import pkg_resources.py31compat - -if sys.platform.startswith('java'): - import org.python.modules.posix.PosixModule as _os -else: - _os = sys.modules[os.name] -try: - _file = file -except NameError: - _file = None -_open = open -from distutils.errors import DistutilsError -from pkg_resources import working_set - - -__all__ = [ - "AbstractSandbox", "DirectorySandbox", "SandboxViolation", "run_setup", -] - - -def _execfile(filename, globals, locals=None): - """ - Python 3 implementation of execfile. - """ - mode = 'rb' - with open(filename, mode) as stream: - script = stream.read() - if locals is None: - locals = globals - code = compile(script, filename, 'exec') - exec(code, globals, locals) - - -@contextlib.contextmanager -def save_argv(repl=None): - saved = sys.argv[:] - if repl is not None: - sys.argv[:] = repl - try: - yield saved - finally: - sys.argv[:] = saved - - -@contextlib.contextmanager -def save_path(): - saved = sys.path[:] - try: - yield saved - finally: - sys.path[:] = saved - - -@contextlib.contextmanager -def override_temp(replacement): - """ - Monkey-patch tempfile.tempdir with replacement, ensuring it exists - """ - pkg_resources.py31compat.makedirs(replacement, exist_ok=True) - - saved = tempfile.tempdir - - tempfile.tempdir = replacement - - try: - yield - finally: - tempfile.tempdir = saved - - -@contextlib.contextmanager -def pushd(target): - saved = os.getcwd() - os.chdir(target) - try: - yield saved - finally: - os.chdir(saved) - - -class UnpickleableException(Exception): - """ - An exception representing another Exception that could not be pickled. - """ - - @staticmethod - def dump(type, exc): - """ - Always return a dumped (pickled) type and exc. If exc can't be pickled, - wrap it in UnpickleableException first. - """ - try: - return pickle.dumps(type), pickle.dumps(exc) - except Exception: - # get UnpickleableException inside the sandbox - from setuptools.sandbox import UnpickleableException as cls - return cls.dump(cls, cls(repr(exc))) - - -class ExceptionSaver: - """ - A Context Manager that will save an exception, serialized, and restore it - later. - """ - - def __enter__(self): - return self - - def __exit__(self, type, exc, tb): - if not exc: - return - - # dump the exception - self._saved = UnpickleableException.dump(type, exc) - self._tb = tb - - # suppress the exception - return True - - def resume(self): - "restore and re-raise any exception" - - if '_saved' not in vars(self): - return - - type, exc = map(pickle.loads, self._saved) - six.reraise(type, exc, self._tb) - - -@contextlib.contextmanager -def save_modules(): - """ - Context in which imported modules are saved. - - Translates exceptions internal to the context into the equivalent exception - outside the context. - """ - saved = sys.modules.copy() - with ExceptionSaver() as saved_exc: - yield saved - - sys.modules.update(saved) - # remove any modules imported since - del_modules = ( - mod_name for mod_name in sys.modules - if mod_name not in saved - # exclude any encodings modules. See #285 - and not mod_name.startswith('encodings.') - ) - _clear_modules(del_modules) - - saved_exc.resume() - - -def _clear_modules(module_names): - for mod_name in list(module_names): - del sys.modules[mod_name] - - -@contextlib.contextmanager -def save_pkg_resources_state(): - saved = pkg_resources.__getstate__() - try: - yield saved - finally: - pkg_resources.__setstate__(saved) - - -@contextlib.contextmanager -def setup_context(setup_dir): - temp_dir = os.path.join(setup_dir, 'temp') - with save_pkg_resources_state(): - with save_modules(): - hide_setuptools() - with save_path(): - with save_argv(): - with override_temp(temp_dir): - with pushd(setup_dir): - # ensure setuptools commands are available - __import__('setuptools') - yield - - -def _needs_hiding(mod_name): - """ - >>> _needs_hiding('setuptools') - True - >>> _needs_hiding('pkg_resources') - True - >>> _needs_hiding('setuptools_plugin') - False - >>> _needs_hiding('setuptools.__init__') - True - >>> _needs_hiding('distutils') - True - >>> _needs_hiding('os') - False - >>> _needs_hiding('Cython') - True - """ - pattern = re.compile(r'(setuptools|pkg_resources|distutils|Cython)(\.|$)') - return bool(pattern.match(mod_name)) - - -def hide_setuptools(): - """ - Remove references to setuptools' modules from sys.modules to allow the - invocation to import the most appropriate setuptools. This technique is - necessary to avoid issues such as #315 where setuptools upgrading itself - would fail to find a function declared in the metadata. - """ - modules = filter(_needs_hiding, sys.modules) - _clear_modules(modules) - - -def run_setup(setup_script, args): - """Run a distutils setup script, sandboxed in its directory""" - setup_dir = os.path.abspath(os.path.dirname(setup_script)) - with setup_context(setup_dir): - try: - sys.argv[:] = [setup_script] + list(args) - sys.path.insert(0, setup_dir) - # reset to include setup dir, w/clean callback list - working_set.__init__() - working_set.callbacks.append(lambda dist: dist.activate()) - - # __file__ should be a byte string on Python 2 (#712) - dunder_file = ( - setup_script - if isinstance(setup_script, str) else - setup_script.encode(sys.getfilesystemencoding()) - ) - - with DirectorySandbox(setup_dir): - ns = dict(__file__=dunder_file, __name__='__main__') - _execfile(setup_script, ns) - except SystemExit as v: - if v.args and v.args[0]: - raise - # Normal exit, just return - - -class AbstractSandbox: - """Wrap 'os' module and 'open()' builtin for virtualizing setup scripts""" - - _active = False - - def __init__(self): - self._attrs = [ - name for name in dir(_os) - if not name.startswith('_') and hasattr(self, name) - ] - - def _copy(self, source): - for name in self._attrs: - setattr(os, name, getattr(source, name)) - - def __enter__(self): - self._copy(self) - if _file: - builtins.file = self._file - builtins.open = self._open - self._active = True - - def __exit__(self, exc_type, exc_value, traceback): - self._active = False - if _file: - builtins.file = _file - builtins.open = _open - self._copy(_os) - - def run(self, func): - """Run 'func' under os sandboxing""" - with self: - return func() - - def _mk_dual_path_wrapper(name): - original = getattr(_os, name) - - def wrap(self, src, dst, *args, **kw): - if self._active: - src, dst = self._remap_pair(name, src, dst, *args, **kw) - return original(src, dst, *args, **kw) - - return wrap - - for name in ["rename", "link", "symlink"]: - if hasattr(_os, name): - locals()[name] = _mk_dual_path_wrapper(name) - - def _mk_single_path_wrapper(name, original=None): - original = original or getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return original(path, *args, **kw) - - return wrap - - if _file: - _file = _mk_single_path_wrapper('file', _file) - _open = _mk_single_path_wrapper('open', _open) - for name in [ - "stat", "listdir", "chdir", "open", "chmod", "chown", "mkdir", - "remove", "unlink", "rmdir", "utime", "lchown", "chroot", "lstat", - "startfile", "mkfifo", "mknod", "pathconf", "access" - ]: - if hasattr(_os, name): - locals()[name] = _mk_single_path_wrapper(name) - - def _mk_single_with_return(name): - original = getattr(_os, name) - - def wrap(self, path, *args, **kw): - if self._active: - path = self._remap_input(name, path, *args, **kw) - return self._remap_output(name, original(path, *args, **kw)) - return original(path, *args, **kw) - - return wrap - - for name in ['readlink', 'tempnam']: - if hasattr(_os, name): - locals()[name] = _mk_single_with_return(name) - - def _mk_query(name): - original = getattr(_os, name) - - def wrap(self, *args, **kw): - retval = original(*args, **kw) - if self._active: - return self._remap_output(name, retval) - return retval - - return wrap - - for name in ['getcwd', 'tmpnam']: - if hasattr(_os, name): - locals()[name] = _mk_query(name) - - def _validate_path(self, path): - """Called to remap or validate any path, whether input or output""" - return path - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - return self._validate_path(path) - - def _remap_output(self, operation, path): - """Called for path outputs""" - return self._validate_path(path) - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - return ( - self._remap_input(operation + '-from', src, *args, **kw), - self._remap_input(operation + '-to', dst, *args, **kw) - ) - - -if hasattr(os, 'devnull'): - _EXCEPTIONS = [os.devnull,] -else: - _EXCEPTIONS = [] - - -class DirectorySandbox(AbstractSandbox): - """Restrict operations to a single subdirectory - pseudo-chroot""" - - write_ops = dict.fromkeys([ - "open", "chmod", "chown", "mkdir", "remove", "unlink", "rmdir", - "utime", "lchown", "chroot", "mkfifo", "mknod", "tempnam", - ]) - - _exception_patterns = [ - # Allow lib2to3 to attempt to save a pickled grammar object (#121) - r'.*lib2to3.*\.pickle$', - ] - "exempt writing to paths that match the pattern" - - def __init__(self, sandbox, exceptions=_EXCEPTIONS): - self._sandbox = os.path.normcase(os.path.realpath(sandbox)) - self._prefix = os.path.join(self._sandbox, '') - self._exceptions = [ - os.path.normcase(os.path.realpath(path)) - for path in exceptions - ] - AbstractSandbox.__init__(self) - - def _violation(self, operation, *args, **kw): - from setuptools.sandbox import SandboxViolation - raise SandboxViolation(operation, args, kw) - - if _file: - - def _file(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("file", path, mode, *args, **kw) - return _file(path, mode, *args, **kw) - - def _open(self, path, mode='r', *args, **kw): - if mode not in ('r', 'rt', 'rb', 'rU', 'U') and not self._ok(path): - self._violation("open", path, mode, *args, **kw) - return _open(path, mode, *args, **kw) - - def tmpnam(self): - self._violation("tmpnam") - - def _ok(self, path): - active = self._active - try: - self._active = False - realpath = os.path.normcase(os.path.realpath(path)) - return ( - self._exempted(realpath) - or realpath == self._sandbox - or realpath.startswith(self._prefix) - ) - finally: - self._active = active - - def _exempted(self, filepath): - start_matches = ( - filepath.startswith(exception) - for exception in self._exceptions - ) - pattern_matches = ( - re.match(pattern, filepath) - for pattern in self._exception_patterns - ) - candidates = itertools.chain(start_matches, pattern_matches) - return any(candidates) - - def _remap_input(self, operation, path, *args, **kw): - """Called for path inputs""" - if operation in self.write_ops and not self._ok(path): - self._violation(operation, os.path.realpath(path), *args, **kw) - return path - - def _remap_pair(self, operation, src, dst, *args, **kw): - """Called for path pairs like rename, link, and symlink operations""" - if not self._ok(src) or not self._ok(dst): - self._violation(operation, src, dst, *args, **kw) - return (src, dst) - - def open(self, file, flags, mode=0o777, *args, **kw): - """Called for low-level os.open()""" - if flags & WRITE_FLAGS and not self._ok(file): - self._violation("os.open", file, flags, mode, *args, **kw) - return _os.open(file, flags, mode, *args, **kw) - - -WRITE_FLAGS = functools.reduce( - operator.or_, [getattr(_os, a, 0) for a in - "O_WRONLY O_RDWR O_APPEND O_CREAT O_TRUNC O_TEMPORARY".split()] -) - - -class SandboxViolation(DistutilsError): - """A setup script attempted to modify the filesystem outside the sandbox""" - - tmpl = textwrap.dedent(""" - SandboxViolation: {cmd}{args!r} {kwargs} - - The package setup script has attempted to modify files on your system - that are not within the EasyInstall build area, and has been aborted. - - This package cannot be safely installed by EasyInstall, and may not - support alternate installation locations even if you run its setup - script by hand. Please inform the package's author and the EasyInstall - maintainers to find out if a fix or workaround is available. - """).lstrip() - - def __str__(self): - cmd, args, kwargs = self.args - return self.tmpl.format(**locals()) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/script (dev).tmpl b/classifier/myenv/lib/python3.6/site-packages/setuptools/script (dev).tmpl deleted file mode 100644 index d58b1bb..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/script (dev).tmpl +++ /dev/null @@ -1,5 +0,0 @@ -# EASY-INSTALL-DEV-SCRIPT: %(spec)r,%(script_name)r -__requires__ = %(spec)r -__import__('pkg_resources').require(%(spec)r) -__file__ = %(dev_path)r -exec(compile(open(__file__).read(), __file__, 'exec')) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/script.tmpl b/classifier/myenv/lib/python3.6/site-packages/setuptools/script.tmpl deleted file mode 100644 index ff5efbc..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/script.tmpl +++ /dev/null @@ -1,3 +0,0 @@ -# EASY-INSTALL-SCRIPT: %(spec)r,%(script_name)r -__requires__ = %(spec)r -__import__('pkg_resources').run_script(%(spec)r, %(script_name)r) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/site-patch.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/site-patch.py deleted file mode 100644 index 0d2d2ff..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/site-patch.py +++ /dev/null @@ -1,74 +0,0 @@ -def __boot(): - import sys - import os - PYTHONPATH = os.environ.get('PYTHONPATH') - if PYTHONPATH is None or (sys.platform == 'win32' and not PYTHONPATH): - PYTHONPATH = [] - else: - PYTHONPATH = PYTHONPATH.split(os.pathsep) - - pic = getattr(sys, 'path_importer_cache', {}) - stdpath = sys.path[len(PYTHONPATH):] - mydir = os.path.dirname(__file__) - - for item in stdpath: - if item == mydir or not item: - continue # skip if current dir. on Windows, or my own directory - importer = pic.get(item) - if importer is not None: - loader = importer.find_module('site') - if loader is not None: - # This should actually reload the current module - loader.load_module('site') - break - else: - try: - import imp # Avoid import loop in Python >= 3.3 - stream, path, descr = imp.find_module('site', [item]) - except ImportError: - continue - if stream is None: - continue - try: - # This should actually reload the current module - imp.load_module('site', stream, path, descr) - finally: - stream.close() - break - else: - raise ImportError("Couldn't find the real 'site' module") - - known_paths = dict([(makepath(item)[1], 1) for item in sys.path]) # 2.2 comp - - oldpos = getattr(sys, '__egginsert', 0) # save old insertion position - sys.__egginsert = 0 # and reset the current one - - for item in PYTHONPATH: - addsitedir(item) - - sys.__egginsert += oldpos # restore effective old position - - d, nd = makepath(stdpath[0]) - insert_at = None - new_path = [] - - for item in sys.path: - p, np = makepath(item) - - if np == nd and insert_at is None: - # We've hit the first 'system' path entry, so added entries go here - insert_at = len(new_path) - - if np in known_paths or insert_at is None: - new_path.append(item) - else: - # new path after the insert point, back-insert it - new_path.insert(insert_at, item) - insert_at += 1 - - sys.path[:] = new_path - - -if __name__ == 'site': - __boot() - del __boot diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/ssl_support.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/ssl_support.py deleted file mode 100644 index 6362f1f..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/ssl_support.py +++ /dev/null @@ -1,260 +0,0 @@ -import os -import socket -import atexit -import re -import functools - -from setuptools.extern.six.moves import urllib, http_client, map, filter - -from pkg_resources import ResolutionError, ExtractionError - -try: - import ssl -except ImportError: - ssl = None - -__all__ = [ - 'VerifyingHTTPSHandler', 'find_ca_bundle', 'is_available', 'cert_paths', - 'opener_for' -] - -cert_paths = """ -/etc/pki/tls/certs/ca-bundle.crt -/etc/ssl/certs/ca-certificates.crt -/usr/share/ssl/certs/ca-bundle.crt -/usr/local/share/certs/ca-root.crt -/etc/ssl/cert.pem -/System/Library/OpenSSL/certs/cert.pem -/usr/local/share/certs/ca-root-nss.crt -/etc/ssl/ca-bundle.pem -""".strip().split() - -try: - HTTPSHandler = urllib.request.HTTPSHandler - HTTPSConnection = http_client.HTTPSConnection -except AttributeError: - HTTPSHandler = HTTPSConnection = object - -is_available = ssl is not None and object not in (HTTPSHandler, HTTPSConnection) - - -try: - from ssl import CertificateError, match_hostname -except ImportError: - try: - from backports.ssl_match_hostname import CertificateError - from backports.ssl_match_hostname import match_hostname - except ImportError: - CertificateError = None - match_hostname = None - -if not CertificateError: - - class CertificateError(ValueError): - pass - - -if not match_hostname: - - def _dnsname_match(dn, hostname, max_wildcards=1): - """Matching according to RFC 6125, section 6.4.3 - - http://tools.ietf.org/html/rfc6125#section-6.4.3 - """ - pats = [] - if not dn: - return False - - # Ported from python3-syntax: - # leftmost, *remainder = dn.split(r'.') - parts = dn.split(r'.') - leftmost = parts[0] - remainder = parts[1:] - - wildcards = leftmost.count('*') - if wildcards > max_wildcards: - # Issue #17980: avoid denials of service by refusing more - # than one wildcard per fragment. A survey of established - # policy among SSL implementations showed it to be a - # reasonable choice. - raise CertificateError( - "too many wildcards in certificate DNS name: " + repr(dn)) - - # speed up common case w/o wildcards - if not wildcards: - return dn.lower() == hostname.lower() - - # RFC 6125, section 6.4.3, subitem 1. - # The client SHOULD NOT attempt to match a presented identifier in which - # the wildcard character comprises a label other than the left-most label. - if leftmost == '*': - # When '*' is a fragment by itself, it matches a non-empty dotless - # fragment. - pats.append('[^.]+') - elif leftmost.startswith('xn--') or hostname.startswith('xn--'): - # RFC 6125, section 6.4.3, subitem 3. - # The client SHOULD NOT attempt to match a presented identifier - # where the wildcard character is embedded within an A-label or - # U-label of an internationalized domain name. - pats.append(re.escape(leftmost)) - else: - # Otherwise, '*' matches any dotless string, e.g. www* - pats.append(re.escape(leftmost).replace(r'\*', '[^.]*')) - - # add the remaining fragments, ignore any wildcards - for frag in remainder: - pats.append(re.escape(frag)) - - pat = re.compile(r'\A' + r'\.'.join(pats) + r'\Z', re.IGNORECASE) - return pat.match(hostname) - - def match_hostname(cert, hostname): - """Verify that *cert* (in decoded format as returned by - SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125 - rules are followed, but IP addresses are not accepted for *hostname*. - - CertificateError is raised on failure. On success, the function - returns nothing. - """ - if not cert: - raise ValueError("empty or no certificate") - dnsnames = [] - san = cert.get('subjectAltName', ()) - for key, value in san: - if key == 'DNS': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if not dnsnames: - # The subject is only checked when there is no dNSName entry - # in subjectAltName - for sub in cert.get('subject', ()): - for key, value in sub: - # XXX according to RFC 2818, the most specific Common Name - # must be used. - if key == 'commonName': - if _dnsname_match(value, hostname): - return - dnsnames.append(value) - if len(dnsnames) > 1: - raise CertificateError("hostname %r " - "doesn't match either of %s" - % (hostname, ', '.join(map(repr, dnsnames)))) - elif len(dnsnames) == 1: - raise CertificateError("hostname %r " - "doesn't match %r" - % (hostname, dnsnames[0])) - else: - raise CertificateError("no appropriate commonName or " - "subjectAltName fields were found") - - -class VerifyingHTTPSHandler(HTTPSHandler): - """Simple verifying handler: no auth, subclasses, timeouts, etc.""" - - def __init__(self, ca_bundle): - self.ca_bundle = ca_bundle - HTTPSHandler.__init__(self) - - def https_open(self, req): - return self.do_open( - lambda host, **kw: VerifyingHTTPSConn(host, self.ca_bundle, **kw), req - ) - - -class VerifyingHTTPSConn(HTTPSConnection): - """Simple verifying connection: no auth, subclasses, timeouts, etc.""" - - def __init__(self, host, ca_bundle, **kw): - HTTPSConnection.__init__(self, host, **kw) - self.ca_bundle = ca_bundle - - def connect(self): - sock = socket.create_connection( - (self.host, self.port), getattr(self, 'source_address', None) - ) - - # Handle the socket if a (proxy) tunnel is present - if hasattr(self, '_tunnel') and getattr(self, '_tunnel_host', None): - self.sock = sock - self._tunnel() - # http://bugs.python.org/issue7776: Python>=3.4.1 and >=2.7.7 - # change self.host to mean the proxy server host when tunneling is - # being used. Adapt, since we are interested in the destination - # host for the match_hostname() comparison. - actual_host = self._tunnel_host - else: - actual_host = self.host - - if hasattr(ssl, 'create_default_context'): - ctx = ssl.create_default_context(cafile=self.ca_bundle) - self.sock = ctx.wrap_socket(sock, server_hostname=actual_host) - else: - # This is for python < 2.7.9 and < 3.4? - self.sock = ssl.wrap_socket( - sock, cert_reqs=ssl.CERT_REQUIRED, ca_certs=self.ca_bundle - ) - try: - match_hostname(self.sock.getpeercert(), actual_host) - except CertificateError: - self.sock.shutdown(socket.SHUT_RDWR) - self.sock.close() - raise - - -def opener_for(ca_bundle=None): - """Get a urlopen() replacement that uses ca_bundle for verification""" - return urllib.request.build_opener( - VerifyingHTTPSHandler(ca_bundle or find_ca_bundle()) - ).open - - -# from jaraco.functools -def once(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - if not hasattr(func, 'always_returns'): - func.always_returns = func(*args, **kwargs) - return func.always_returns - return wrapper - - -@once -def get_win_certfile(): - try: - import wincertstore - except ImportError: - return None - - class CertFile(wincertstore.CertFile): - def __init__(self): - super(CertFile, self).__init__() - atexit.register(self.close) - - def close(self): - try: - super(CertFile, self).close() - except OSError: - pass - - _wincerts = CertFile() - _wincerts.addstore('CA') - _wincerts.addstore('ROOT') - return _wincerts.name - - -def find_ca_bundle(): - """Return an existing CA bundle path, or None""" - extant_cert_paths = filter(os.path.isfile, cert_paths) - return ( - get_win_certfile() - or next(extant_cert_paths, None) - or _certifi_where() - ) - - -def _certifi_where(): - try: - return __import__('certifi').where() - except (ImportError, ResolutionError, ExtractionError): - pass diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/unicode_utils.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/unicode_utils.py deleted file mode 100644 index 7c63efd..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/unicode_utils.py +++ /dev/null @@ -1,44 +0,0 @@ -import unicodedata -import sys - -from setuptools.extern import six - - -# HFS Plus uses decomposed UTF-8 -def decompose(path): - if isinstance(path, six.text_type): - return unicodedata.normalize('NFD', path) - try: - path = path.decode('utf-8') - path = unicodedata.normalize('NFD', path) - path = path.encode('utf-8') - except UnicodeError: - pass # Not UTF-8 - return path - - -def filesys_decode(path): - """ - Ensure that the given path is decoded, - NONE when no expected encoding works - """ - - if isinstance(path, six.text_type): - return path - - fs_enc = sys.getfilesystemencoding() or 'utf-8' - candidates = fs_enc, 'utf-8' - - for enc in candidates: - try: - return path.decode(enc) - except UnicodeDecodeError: - continue - - -def try_encode(string, enc): - "turn unicode encoding into a functional routine" - try: - return string.encode(enc) - except UnicodeEncodeError: - return None diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/version.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/version.py deleted file mode 100644 index 95e1869..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/version.py +++ /dev/null @@ -1,6 +0,0 @@ -import pkg_resources - -try: - __version__ = pkg_resources.get_distribution('setuptools').version -except Exception: - __version__ = 'unknown' diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/wheel.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/wheel.py deleted file mode 100644 index 37dfa53..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/wheel.py +++ /dev/null @@ -1,163 +0,0 @@ -'''Wheels support.''' - -from distutils.util import get_platform -import email -import itertools -import os -import re -import zipfile - -from pkg_resources import Distribution, PathMetadata, parse_version -from setuptools.extern.six import PY3 -from setuptools import Distribution as SetuptoolsDistribution -from setuptools import pep425tags -from setuptools.command.egg_info import write_requirements - - -WHEEL_NAME = re.compile( - r"""^(?P.+?)-(?P\d.*?) - ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) - )\.whl$""", -re.VERBOSE).match - -NAMESPACE_PACKAGE_INIT = '''\ -try: - __import__('pkg_resources').declare_namespace(__name__) -except ImportError: - __path__ = __import__('pkgutil').extend_path(__path__, __name__) -''' - - -def unpack(src_dir, dst_dir): - '''Move everything under `src_dir` to `dst_dir`, and delete the former.''' - for dirpath, dirnames, filenames in os.walk(src_dir): - subdir = os.path.relpath(dirpath, src_dir) - for f in filenames: - src = os.path.join(dirpath, f) - dst = os.path.join(dst_dir, subdir, f) - os.renames(src, dst) - for n, d in reversed(list(enumerate(dirnames))): - src = os.path.join(dirpath, d) - dst = os.path.join(dst_dir, subdir, d) - if not os.path.exists(dst): - # Directory does not exist in destination, - # rename it and prune it from os.walk list. - os.renames(src, dst) - del dirnames[n] - # Cleanup. - for dirpath, dirnames, filenames in os.walk(src_dir, topdown=True): - assert not filenames - os.rmdir(dirpath) - - -class Wheel(object): - - def __init__(self, filename): - match = WHEEL_NAME(os.path.basename(filename)) - if match is None: - raise ValueError('invalid wheel name: %r' % filename) - self.filename = filename - for k, v in match.groupdict().items(): - setattr(self, k, v) - - def tags(self): - '''List tags (py_version, abi, platform) supported by this wheel.''' - return itertools.product(self.py_version.split('.'), - self.abi.split('.'), - self.platform.split('.')) - - def is_compatible(self): - '''Is the wheel is compatible with the current platform?''' - supported_tags = pep425tags.get_supported() - return next((True for t in self.tags() if t in supported_tags), False) - - def egg_name(self): - return Distribution( - project_name=self.project_name, version=self.version, - platform=(None if self.platform == 'any' else get_platform()), - ).egg_name() + '.egg' - - def install_as_egg(self, destination_eggdir): - '''Install wheel as an egg directory.''' - with zipfile.ZipFile(self.filename) as zf: - dist_basename = '%s-%s' % (self.project_name, self.version) - dist_info = '%s.dist-info' % dist_basename - dist_data = '%s.data' % dist_basename - def get_metadata(name): - with zf.open('%s/%s' % (dist_info, name)) as fp: - value = fp.read().decode('utf-8') if PY3 else fp.read() - return email.parser.Parser().parsestr(value) - wheel_metadata = get_metadata('WHEEL') - dist_metadata = get_metadata('METADATA') - # Check wheel format version is supported. - wheel_version = parse_version(wheel_metadata.get('Wheel-Version')) - if not parse_version('1.0') <= wheel_version < parse_version('2.0dev0'): - raise ValueError('unsupported wheel format version: %s' % wheel_version) - # Extract to target directory. - os.mkdir(destination_eggdir) - zf.extractall(destination_eggdir) - # Convert metadata. - dist_info = os.path.join(destination_eggdir, dist_info) - dist = Distribution.from_location( - destination_eggdir, dist_info, - metadata=PathMetadata(destination_eggdir, dist_info) - ) - # Note: we need to evaluate and strip markers now, - # as we can't easily convert back from the syntax: - # foobar; "linux" in sys_platform and extra == 'test' - def raw_req(req): - req.marker = None - return str(req) - install_requires = list(sorted(map(raw_req, dist.requires()))) - extras_require = { - extra: list(sorted( - req - for req in map(raw_req, dist.requires((extra,))) - if req not in install_requires - )) - for extra in dist.extras - } - egg_info = os.path.join(destination_eggdir, 'EGG-INFO') - os.rename(dist_info, egg_info) - os.rename(os.path.join(egg_info, 'METADATA'), - os.path.join(egg_info, 'PKG-INFO')) - setup_dist = SetuptoolsDistribution(attrs=dict( - install_requires=install_requires, - extras_require=extras_require, - )) - write_requirements(setup_dist.get_command_obj('egg_info'), - None, os.path.join(egg_info, 'requires.txt')) - # Move data entries to their correct location. - dist_data = os.path.join(destination_eggdir, dist_data) - dist_data_scripts = os.path.join(dist_data, 'scripts') - if os.path.exists(dist_data_scripts): - egg_info_scripts = os.path.join(destination_eggdir, - 'EGG-INFO', 'scripts') - os.mkdir(egg_info_scripts) - for entry in os.listdir(dist_data_scripts): - # Remove bytecode, as it's not properly handled - # during easy_install scripts install phase. - if entry.endswith('.pyc'): - os.unlink(os.path.join(dist_data_scripts, entry)) - else: - os.rename(os.path.join(dist_data_scripts, entry), - os.path.join(egg_info_scripts, entry)) - os.rmdir(dist_data_scripts) - for subdir in filter(os.path.exists, ( - os.path.join(dist_data, d) - for d in ('data', 'headers', 'purelib', 'platlib') - )): - unpack(subdir, destination_eggdir) - if os.path.exists(dist_data): - os.rmdir(dist_data) - # Fix namespace packages. - namespace_packages = os.path.join(egg_info, 'namespace_packages.txt') - if os.path.exists(namespace_packages): - with open(namespace_packages) as fp: - namespace_packages = fp.read().split() - for mod in namespace_packages: - mod_dir = os.path.join(destination_eggdir, *mod.split('.')) - mod_init = os.path.join(mod_dir, '__init__.py') - if os.path.exists(mod_dir) and not os.path.exists(mod_init): - with open(mod_init, 'w') as fp: - fp.write(NAMESPACE_PACKAGE_INIT) diff --git a/classifier/myenv/lib/python3.6/site-packages/setuptools/windows_support.py b/classifier/myenv/lib/python3.6/site-packages/setuptools/windows_support.py deleted file mode 100644 index cb977cf..0000000 --- a/classifier/myenv/lib/python3.6/site-packages/setuptools/windows_support.py +++ /dev/null @@ -1,29 +0,0 @@ -import platform -import ctypes - - -def windows_only(func): - if platform.system() != 'Windows': - return lambda *args, **kwargs: None - return func - - -@windows_only -def hide_file(path): - """ - Set the hidden attribute on a file or directory. - - From http://stackoverflow.com/questions/19622133/ - - `path` must be text. - """ - __import__('ctypes.wintypes') - SetFileAttributes = ctypes.windll.kernel32.SetFileAttributesW - SetFileAttributes.argtypes = ctypes.wintypes.LPWSTR, ctypes.wintypes.DWORD - SetFileAttributes.restype = ctypes.wintypes.BOOL - - FILE_ATTRIBUTE_HIDDEN = 0x02 - - ret = SetFileAttributes(path, FILE_ATTRIBUTE_HIDDEN) - if not ret: - raise ctypes.WinError() diff --git a/classifier/myenv/lib64 b/classifier/myenv/lib64 deleted file mode 120000 index 7951405..0000000 --- a/classifier/myenv/lib64 +++ /dev/null @@ -1 +0,0 @@ -lib \ No newline at end of file diff --git a/classifier/myenv/pyvenv.cfg b/classifier/myenv/pyvenv.cfg deleted file mode 100644 index 12a52df..0000000 --- a/classifier/myenv/pyvenv.cfg +++ /dev/null @@ -1,3 +0,0 @@ -home = /usr/bin -include-system-site-packages = false -version = 3.6.7 diff --git a/classifier/myenv/share/python-wheels/CacheControl-0.11.7-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/CacheControl-0.11.7-py2.py3-none-any.whl deleted file mode 100644 index 186f75a..0000000 Binary files a/classifier/myenv/share/python-wheels/CacheControl-0.11.7-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/appdirs-1.4.3-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/appdirs-1.4.3-py2.py3-none-any.whl deleted file mode 100644 index 14159df..0000000 Binary files a/classifier/myenv/share/python-wheels/appdirs-1.4.3-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/certifi-2018.1.18-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/certifi-2018.1.18-py2.py3-none-any.whl deleted file mode 100644 index 8914172..0000000 Binary files a/classifier/myenv/share/python-wheels/certifi-2018.1.18-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/chardet-3.0.4-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/chardet-3.0.4-py2.py3-none-any.whl deleted file mode 100644 index 7e51f48..0000000 Binary files a/classifier/myenv/share/python-wheels/chardet-3.0.4-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/colorama-0.3.7-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/colorama-0.3.7-py2.py3-none-any.whl deleted file mode 100644 index 7e14f3f..0000000 Binary files a/classifier/myenv/share/python-wheels/colorama-0.3.7-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/distlib-0.2.6-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/distlib-0.2.6-py2.py3-none-any.whl deleted file mode 100644 index 2dc0f91..0000000 Binary files a/classifier/myenv/share/python-wheels/distlib-0.2.6-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/distro-1.0.1-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/distro-1.0.1-py2.py3-none-any.whl deleted file mode 100644 index 8270abd..0000000 Binary files a/classifier/myenv/share/python-wheels/distro-1.0.1-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/html5lib-0.999999999-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/html5lib-0.999999999-py2.py3-none-any.whl deleted file mode 100644 index 62e141a..0000000 Binary files a/classifier/myenv/share/python-wheels/html5lib-0.999999999-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/idna-2.6-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/idna-2.6-py2.py3-none-any.whl deleted file mode 100644 index 77386b3..0000000 Binary files a/classifier/myenv/share/python-wheels/idna-2.6-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/ipaddress-0.0.0-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/ipaddress-0.0.0-py2.py3-none-any.whl deleted file mode 100644 index e0e2371..0000000 Binary files a/classifier/myenv/share/python-wheels/ipaddress-0.0.0-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/lockfile-0.12.2-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/lockfile-0.12.2-py2.py3-none-any.whl deleted file mode 100644 index add2bc6..0000000 Binary files a/classifier/myenv/share/python-wheels/lockfile-0.12.2-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/packaging-17.1-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/packaging-17.1-py2.py3-none-any.whl deleted file mode 100644 index 23ca472..0000000 Binary files a/classifier/myenv/share/python-wheels/packaging-17.1-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/pip-9.0.1-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/pip-9.0.1-py2.py3-none-any.whl deleted file mode 100644 index 60340e5..0000000 Binary files a/classifier/myenv/share/python-wheels/pip-9.0.1-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/pkg_resources-0.0.0-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/pkg_resources-0.0.0-py2.py3-none-any.whl deleted file mode 100644 index b39e270..0000000 Binary files a/classifier/myenv/share/python-wheels/pkg_resources-0.0.0-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/progress-1.2-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/progress-1.2-py2.py3-none-any.whl deleted file mode 100644 index 7a056dd..0000000 Binary files a/classifier/myenv/share/python-wheels/progress-1.2-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/pyparsing-2.2.0-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/pyparsing-2.2.0-py2.py3-none-any.whl deleted file mode 100644 index 6bb4728..0000000 Binary files a/classifier/myenv/share/python-wheels/pyparsing-2.2.0-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/requests-2.18.4-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/requests-2.18.4-py2.py3-none-any.whl deleted file mode 100644 index 141f077..0000000 Binary files a/classifier/myenv/share/python-wheels/requests-2.18.4-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/retrying-1.3.3-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/retrying-1.3.3-py2.py3-none-any.whl deleted file mode 100644 index 3eece01..0000000 Binary files a/classifier/myenv/share/python-wheels/retrying-1.3.3-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/setuptools-39.0.1-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/setuptools-39.0.1-py2.py3-none-any.whl deleted file mode 100644 index d42fe7b..0000000 Binary files a/classifier/myenv/share/python-wheels/setuptools-39.0.1-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/six-1.11.0-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/six-1.11.0-py2.py3-none-any.whl deleted file mode 100644 index 273e49c..0000000 Binary files a/classifier/myenv/share/python-wheels/six-1.11.0-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/urllib3-1.22-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/urllib3-1.22-py2.py3-none-any.whl deleted file mode 100644 index dbda3b7..0000000 Binary files a/classifier/myenv/share/python-wheels/urllib3-1.22-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/webencodings-0.5-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/webencodings-0.5-py2.py3-none-any.whl deleted file mode 100644 index 6ec1c14..0000000 Binary files a/classifier/myenv/share/python-wheels/webencodings-0.5-py2.py3-none-any.whl and /dev/null differ diff --git a/classifier/myenv/share/python-wheels/wheel-0.30.0-py2.py3-none-any.whl b/classifier/myenv/share/python-wheels/wheel-0.30.0-py2.py3-none-any.whl deleted file mode 100644 index 61aeac6..0000000 Binary files a/classifier/myenv/share/python-wheels/wheel-0.30.0-py2.py3-none-any.whl and /dev/null differ