From f531b4c05cc3f8eb8bb18997eede3993ef4779cd Mon Sep 17 00:00:00 2001 From: su-fang Date: Fri, 10 Feb 2023 13:58:37 +0800 Subject: [PATCH] Import Upstream version 1.4.0 --- .flake8 | 28 + .gitattributes | 4 + .github/ISSUE_TEMPLATE/config.yml | 12 + .github/PULL_REQUEST_TEMPLATE.md | 13 + .github/workflows/downstream.yml | 76 + .github/workflows/integration.yml | 32 + .github/workflows/release.yml | 52 + .github/workflows/tests.yml | 72 + .gitignore | 41 + .pre-commit-config.yaml | 87 + CHANGELOG.md | 476 ++ LICENSE | 20 + Makefile | 30 + README.md | 45 + poetry.lock | 917 +++ pyproject.toml | 121 + src/poetry/core/__init__.py | 15 + .../core/_vendor/_pyrsistent_version.py | 1 + src/poetry/core/_vendor/attr/__init__.py | 79 + src/poetry/core/_vendor/attr/_cmp.py | 155 + src/poetry/core/_vendor/attr/_compat.py | 185 + src/poetry/core/_vendor/attr/_config.py | 31 + src/poetry/core/_vendor/attr/_funcs.py | 420 ++ src/poetry/core/_vendor/attr/_make.py | 3006 +++++++++ src/poetry/core/_vendor/attr/_next_gen.py | 220 + src/poetry/core/_vendor/attr/_version_info.py | 86 + src/poetry/core/_vendor/attr/converters.py | 144 + src/poetry/core/_vendor/attr/exceptions.py | 92 + src/poetry/core/_vendor/attr/filters.py | 51 + src/poetry/core/_vendor/attr/py.typed | 0 src/poetry/core/_vendor/attr/setters.py | 73 + src/poetry/core/_vendor/attr/validators.py | 594 ++ src/poetry/core/_vendor/attrs/LICENSE | 21 + src/poetry/core/_vendor/attrs/__init__.py | 70 + src/poetry/core/_vendor/attrs/converters.py | 3 + src/poetry/core/_vendor/attrs/exceptions.py | 3 + src/poetry/core/_vendor/attrs/filters.py | 3 + src/poetry/core/_vendor/attrs/py.typed | 0 src/poetry/core/_vendor/attrs/setters.py | 3 + src/poetry/core/_vendor/attrs/validators.py | 3 + src/poetry/core/_vendor/jsonschema/COPYING | 19 + .../core/_vendor/jsonschema/__init__.py | 71 + .../core/_vendor/jsonschema/__main__.py | 3 + src/poetry/core/_vendor/jsonschema/_format.py | 513 ++ .../_vendor/jsonschema/_legacy_validators.py | 319 + src/poetry/core/_vendor/jsonschema/_types.py | 203 + src/poetry/core/_vendor/jsonschema/_utils.py | 345 + .../core/_vendor/jsonschema/_validators.py | 467 ++ .../_vendor/jsonschema/benchmarks/__init__.py | 5 + .../_vendor/jsonschema/benchmarks/issue232.py | 25 + .../jsonschema/benchmarks/issue232/issue.json | 2653 ++++++++ .../benchmarks/json_schema_test_suite.py | 12 + src/poetry/core/_vendor/jsonschema/cli.py | 299 + .../core/_vendor/jsonschema/exceptions.py | 396 ++ .../core/_vendor/jsonschema/protocols.py | 224 + .../jsonschema/schemas/draft2019-09.json | 42 + .../jsonschema/schemas/draft2020-12.json | 58 + .../_vendor/jsonschema/schemas/draft3.json | 172 + .../_vendor/jsonschema/schemas/draft4.json | 149 + .../_vendor/jsonschema/schemas/draft6.json | 153 + .../_vendor/jsonschema/schemas/draft7.json | 166 + .../jsonschema/schemas/vocabularies.json | 1 + .../core/_vendor/jsonschema/validators.py | 1156 ++++ src/poetry/core/_vendor/lark/LICENSE | 18 + src/poetry/core/_vendor/lark/__init__.py | 38 + .../_vendor/lark/__pyinstaller/__init__.py | 6 + .../_vendor/lark/__pyinstaller/hook-lark.py | 14 + src/poetry/core/_vendor/lark/ast_utils.py | 59 + src/poetry/core/_vendor/lark/common.py | 82 + src/poetry/core/_vendor/lark/exceptions.py | 292 + src/poetry/core/_vendor/lark/grammar.py | 122 + .../core/_vendor/lark/grammars/__init__.py | 0 .../core/_vendor/lark/grammars/common.lark | 59 + .../core/_vendor/lark/grammars/lark.lark | 59 + .../core/_vendor/lark/grammars/python.lark | 304 + .../core/_vendor/lark/grammars/unicode.lark | 7 + src/poetry/core/_vendor/lark/indenter.py | 112 + src/poetry/core/_vendor/lark/lark.py | 648 ++ src/poetry/core/_vendor/lark/lexer.py | 603 ++ src/poetry/core/_vendor/lark/load_grammar.py | 1423 ++++ .../core/_vendor/lark/parse_tree_builder.py | 387 ++ .../core/_vendor/lark/parser_frontends.py | 245 + .../core/_vendor/lark/parsers/__init__.py | 0 src/poetry/core/_vendor/lark/parsers/cyk.py | 345 + .../core/_vendor/lark/parsers/earley.py | 295 + .../_vendor/lark/parsers/earley_common.py | 42 + .../_vendor/lark/parsers/earley_forest.py | 804 +++ .../_vendor/lark/parsers/grammar_analysis.py | 185 + .../_vendor/lark/parsers/lalr_analysis.py | 303 + .../lark/parsers/lalr_interactive_parser.py | 148 + .../core/_vendor/lark/parsers/lalr_parser.py | 199 + .../_vendor/lark/parsers/resolve_ambig.py | 109 + .../core/_vendor/lark/parsers/xearley.py | 159 + src/poetry/core/_vendor/lark/py.typed | 0 src/poetry/core/_vendor/lark/reconstruct.py | 106 + .../core/_vendor/lark/tools/__init__.py | 64 + src/poetry/core/_vendor/lark/tools/nearley.py | 202 + .../core/_vendor/lark/tools/serialize.py | 34 + .../core/_vendor/lark/tools/standalone.py | 194 + src/poetry/core/_vendor/lark/tree.py | 263 + src/poetry/core/_vendor/lark/tree_matcher.py | 186 + .../core/_vendor/lark/tree_templates.py | 180 + src/poetry/core/_vendor/lark/utils.py | 339 + src/poetry/core/_vendor/lark/visitors.py | 587 ++ src/poetry/core/_vendor/packaging/LICENSE | 3 + .../core/_vendor/packaging/LICENSE.APACHE | 177 + src/poetry/core/_vendor/packaging/LICENSE.BSD | 23 + .../core/_vendor/packaging/__about__.py | 26 + src/poetry/core/_vendor/packaging/__init__.py | 25 + .../core/_vendor/packaging/_manylinux.py | 301 + .../core/_vendor/packaging/_musllinux.py | 136 + .../core/_vendor/packaging/_structures.py | 61 + src/poetry/core/_vendor/packaging/markers.py | 304 + src/poetry/core/_vendor/packaging/py.typed | 0 .../core/_vendor/packaging/requirements.py | 146 + .../core/_vendor/packaging/specifiers.py | 802 +++ src/poetry/core/_vendor/packaging/tags.py | 487 ++ src/poetry/core/_vendor/packaging/utils.py | 136 + src/poetry/core/_vendor/packaging/version.py | 504 ++ src/poetry/core/_vendor/pyparsing/LICENSE | 18 + src/poetry/core/_vendor/pyparsing/__init__.py | 331 + src/poetry/core/_vendor/pyparsing/actions.py | 207 + src/poetry/core/_vendor/pyparsing/common.py | 424 ++ src/poetry/core/_vendor/pyparsing/core.py | 5814 +++++++++++++++++ .../_vendor/pyparsing/diagram/__init__.py | 642 ++ .../core/_vendor/pyparsing/exceptions.py | 267 + src/poetry/core/_vendor/pyparsing/helpers.py | 1088 +++ src/poetry/core/_vendor/pyparsing/py.typed | 0 src/poetry/core/_vendor/pyparsing/results.py | 760 +++ src/poetry/core/_vendor/pyparsing/testing.py | 331 + src/poetry/core/_vendor/pyparsing/unicode.py | 352 + src/poetry/core/_vendor/pyparsing/util.py | 235 + .../core/_vendor/pyrsistent/LICENSE.mit | 22 + .../core/_vendor/pyrsistent/__init__.py | 47 + .../core/_vendor/pyrsistent/_checked_types.py | 542 ++ .../core/_vendor/pyrsistent/_field_common.py | 332 + .../core/_vendor/pyrsistent/_helpers.py | 97 + .../core/_vendor/pyrsistent/_immutable.py | 103 + src/poetry/core/_vendor/pyrsistent/_pbag.py | 267 + src/poetry/core/_vendor/pyrsistent/_pclass.py | 262 + src/poetry/core/_vendor/pyrsistent/_pdeque.py | 376 ++ src/poetry/core/_vendor/pyrsistent/_plist.py | 313 + src/poetry/core/_vendor/pyrsistent/_pmap.py | 576 ++ .../core/_vendor/pyrsistent/_precord.py | 167 + src/poetry/core/_vendor/pyrsistent/_pset.py | 227 + .../core/_vendor/pyrsistent/_pvector.py | 711 ++ src/poetry/core/_vendor/pyrsistent/_toolz.py | 83 + .../_vendor/pyrsistent/_transformations.py | 139 + src/poetry/core/_vendor/pyrsistent/py.typed | 0 src/poetry/core/_vendor/pyrsistent/typing.py | 80 + src/poetry/core/_vendor/tomlkit/LICENSE | 20 + src/poetry/core/_vendor/tomlkit/__init__.py | 55 + src/poetry/core/_vendor/tomlkit/_compat.py | 22 + src/poetry/core/_vendor/tomlkit/_utils.py | 155 + src/poetry/core/_vendor/tomlkit/api.py | 287 + src/poetry/core/_vendor/tomlkit/container.py | 907 +++ src/poetry/core/_vendor/tomlkit/exceptions.py | 227 + src/poetry/core/_vendor/tomlkit/items.py | 1950 ++++++ src/poetry/core/_vendor/tomlkit/parser.py | 1138 ++++ src/poetry/core/_vendor/tomlkit/py.typed | 0 src/poetry/core/_vendor/tomlkit/source.py | 181 + src/poetry/core/_vendor/tomlkit/toml_char.py | 52 + .../core/_vendor/tomlkit/toml_document.py | 7 + src/poetry/core/_vendor/tomlkit/toml_file.py | 58 + .../core/_vendor/typing_extensions.LICENSE | 254 + src/poetry/core/_vendor/typing_extensions.py | 2209 +++++++ src/poetry/core/_vendor/vendor.txt | 9 + src/poetry/core/constraints/__init__.py | 0 .../core/constraints/generic/__init__.py | 20 + .../constraints/generic/any_constraint.py | 42 + .../constraints/generic/base_constraint.py | 39 + .../core/constraints/generic/constraint.py | 137 + .../constraints/generic/empty_constraint.py | 40 + .../constraints/generic/multi_constraint.py | 96 + src/poetry/core/constraints/generic/parser.py | 65 + .../constraints/generic/union_constraint.py | 146 + .../core/constraints/version/__init__.py | 24 + .../constraints/version/empty_constraint.py | 56 + .../core/constraints/version/exceptions.py | 5 + src/poetry/core/constraints/version/parser.py | 167 + .../core/constraints/version/patterns.py | 28 + src/poetry/core/constraints/version/util.py | 58 + .../core/constraints/version/version.py | 181 + .../constraints/version/version_constraint.py | 65 + .../core/constraints/version/version_range.py | 426 ++ .../version/version_range_constraint.py | 93 + .../core/constraints/version/version_union.py | 422 ++ src/poetry/core/exceptions/__init__.py | 6 + src/poetry/core/exceptions/base.py | 5 + src/poetry/core/factory.py | 455 ++ src/poetry/core/json/__init__.py | 40 + .../core/json/schemas/poetry-schema.json | 655 ++ src/poetry/core/masonry/__init__.py | 8 + src/poetry/core/masonry/api.py | 83 + src/poetry/core/masonry/builder.py | 33 + src/poetry/core/masonry/builders/__init__.py | 0 src/poetry/core/masonry/builders/builder.py | 398 ++ src/poetry/core/masonry/builders/sdist.py | 423 ++ src/poetry/core/masonry/builders/wheel.py | 428 ++ src/poetry/core/masonry/metadata.py | 97 + src/poetry/core/masonry/utils/__init__.py | 0 src/poetry/core/masonry/utils/helpers.py | 83 + src/poetry/core/masonry/utils/include.py | 51 + src/poetry/core/masonry/utils/module.py | 115 + .../core/masonry/utils/package_include.py | 93 + src/poetry/core/packages/__init__.py | 0 .../core/packages/constraints/__init__.py | 32 + src/poetry/core/packages/dependency.py | 530 ++ src/poetry/core/packages/dependency_group.py | 57 + .../core/packages/directory_dependency.py | 95 + src/poetry/core/packages/file_dependency.py | 84 + src/poetry/core/packages/package.py | 645 ++ src/poetry/core/packages/project_package.py | 90 + src/poetry/core/packages/specification.py | 202 + src/poetry/core/packages/url_dependency.py | 63 + src/poetry/core/packages/utils/__init__.py | 0 src/poetry/core/packages/utils/link.py | 232 + src/poetry/core/packages/utils/utils.py | 391 ++ src/poetry/core/packages/vcs_dependency.py | 125 + src/poetry/core/poetry.py | 45 + src/poetry/core/py.typed | 0 src/poetry/core/pyproject/__init__.py | 0 src/poetry/core/pyproject/exceptions.py | 7 + src/poetry/core/pyproject/tables.py | 55 + src/poetry/core/pyproject/toml.py | 114 + src/poetry/core/semver/__init__.py | 10 + src/poetry/core/semver/empty_constraint.py | 6 + src/poetry/core/semver/exceptions.py | 6 + src/poetry/core/semver/helpers.py | 7 + src/poetry/core/semver/patterns.py | 18 + src/poetry/core/semver/util.py | 6 + src/poetry/core/semver/version.py | 6 + src/poetry/core/semver/version_constraint.py | 6 + src/poetry/core/semver/version_range.py | 6 + .../core/semver/version_range_constraint.py | 6 + src/poetry/core/semver/version_union.py | 6 + src/poetry/core/spdx/__init__.py | 0 src/poetry/core/spdx/data/licenses.json | 1847 ++++++ src/poetry/core/spdx/helpers.py | 50 + src/poetry/core/spdx/license.py | 162 + src/poetry/core/spdx/updater.py | 39 + src/poetry/core/toml/__init__.py | 7 + src/poetry/core/toml/exceptions.py | 9 + src/poetry/core/toml/file.py | 42 + src/poetry/core/utils/__init__.py | 0 src/poetry/core/utils/_compat.py | 6 + src/poetry/core/utils/helpers.py | 107 + src/poetry/core/utils/patterns.py | 11 + src/poetry/core/utils/toml_file.py | 20 + src/poetry/core/vcs/__init__.py | 35 + src/poetry/core/vcs/git.py | 384 ++ src/poetry/core/version/__init__.py | 0 src/poetry/core/version/exceptions.py | 5 + src/poetry/core/version/grammars/__init__.py | 10 + src/poetry/core/version/grammars/markers.lark | 36 + src/poetry/core/version/grammars/pep508.lark | 29 + src/poetry/core/version/helpers.py | 65 + src/poetry/core/version/markers.py | 926 +++ src/poetry/core/version/parser.py | 31 + src/poetry/core/version/pep440/__init__.py | 9 + src/poetry/core/version/pep440/parser.py | 83 + src/poetry/core/version/pep440/segments.py | 146 + src/poetry/core/version/pep440/version.py | 317 + src/poetry/core/version/requirements.py | 108 + tests/__init__.py | 0 tests/conftest.py | 110 + tests/constraints/__init__.py | 0 tests/constraints/generic/__init__.py | 0 tests/constraints/generic/test_constraint.py | 185 + tests/constraints/generic/test_main.py | 59 + .../generic/test_multi_constraint.py | 43 + .../generic/test_union_constraint.py | 32 + tests/constraints/version/__init__.py | 0 tests/constraints/version/test_helpers.py | 452 ++ .../version/test_parse_constraint.py | 256 + tests/constraints/version/test_utils.py | 83 + tests/constraints/version/test_version.py | 529 ++ .../version/test_version_constraint.py | 32 + .../constraints/version/test_version_range.py | 460 ++ tests/fixtures/complete.toml | 47 + .../demo-0.1.0-py2.py3-none-any.whl | Bin 0 -> 1116 bytes .../fixtures/distributions/demo-0.1.0.tar.gz | Bin 0 -> 961 bytes .../fixtures/invalid_pyproject/pyproject.toml | 11 + tests/fixtures/pep_517_backend/README.md | 2 + .../fixtures/pep_517_backend/foo/__init__.py | 0 tests/fixtures/pep_517_backend/pyproject.toml | 37 + .../pyproject.toml | 12 + .../pyproject.toml | 22 + .../README.rst | 2 + .../pyproject.toml | 17 + .../simple_project/__init__.py | 0 .../README.rst | 2 + .../pyproject.toml | 20 + .../simple_project/__init__.py | 0 .../pyproject.toml | 13 + .../project/__init__.py | 0 .../pyproject.toml | 20 + .../project/__init__.py | 0 .../pyproject.toml | 19 + .../pyproject.toml | 13 + .../project_with_setup/my_package/__init__.py | 0 tests/fixtures/project_with_setup/setup.py | 14 + .../project_with_setup_cfg_only/setup.cfg | 18 + tests/fixtures/sample_project/README.rst | 2 + tests/fixtures/sample_project/pyproject.toml | 71 + tests/fixtures/script-files/sample_script.py | 6 + tests/fixtures/script-files/sample_script.sh | 3 + tests/fixtures/simple_project/README.rst | 2 + .../simple_project-1.2.3-py2.py3-none-any.whl | Bin 0 -> 1320 bytes .../dist/simple_project-1.2.3.tar.gz | Bin 0 -> 1106 bytes tests/fixtures/simple_project/pyproject.toml | 25 + .../simple_project/simple_project/__init__.py | 0 tests/fixtures/with_readme_files/README-1.rst | 2 + tests/fixtures/with_readme_files/README-2.rst | 2 + .../fixtures/with_readme_files/pyproject.toml | 19 + .../with_readme_files/single_python.py | 6 + tests/integration/__init__.py | 0 tests/integration/test_pep517.py | 98 + tests/integration/test_pep517_backend.py | 54 + tests/json/__init__.py | 0 tests/json/test_poetry_schema.py | 59 + tests/masonry/__init__.py | 0 tests/masonry/builders/__init__.py | 0 .../build_script_in_subdir/pyproject.toml | 14 + .../build_script_in_subdir/scripts/build.py | 0 .../build_script_in_subdir/src/foo.py | 0 .../case_sensitive_exclusions/LICENSE | 20 + .../case_sensitive_exclusions/README.rst | 2 + .../my_package/Foo/Bar.py | 0 .../my_package/Foo/IncludedBar.py | 0 .../my_package/Foo/SecondBar.py | 0 .../my_package/Foo/lowercasebar.py | 0 .../my_package/FooBar/Bar.py | 0 .../my_package/FooBar/lowercasebar.py | 0 .../my_package/__init__.py | 1 + .../my_package/bar/CapitalFoo.py | 0 .../my_package/bar/foo.py | 0 .../case_sensitive_exclusions/pyproject.toml | 49 + .../comma_file/comma_file/__init__.py | 0 .../fixtures/comma_file/comma_file/a,b.py | 0 .../fixtures/comma_file/pyproject.toml | 12 + .../builders/fixtures/complete/LICENSE | 20 + .../builders/fixtures/complete/README.rst | 2 + .../builders/fixtures/complete/bin/script.sh | 3 + .../fixtures/complete/my_package/__init__.py | 1 + .../complete/my_package/data1/test.json | 1 + .../complete/my_package/sub_pkg1/__init__.py | 0 .../my_package/sub_pkg1/extra_file.xml | 0 .../complete/my_package/sub_pkg2/__init__.py | 0 .../my_package/sub_pkg2/data2/data.json | 1 + .../complete/my_package/sub_pkg3/foo.py | 0 .../builders/fixtures/complete/pyproject.toml | 56 + .../default_src_with_excluded_data/LICENSE | 20 + .../default_src_with_excluded_data/README.rst | 2 + .../pyproject.toml | 39 + .../src/my_package/__init__.py | 0 .../src/my_package/data/data1.txt | 0 .../src/my_package/data/sub_data/data2.txt | 0 .../src/my_package/data/sub_data/data3.txt | 0 .../default_with_excluded_data/LICENSE | 20 + .../default_with_excluded_data/README.rst | 2 + .../my_package/__init__.py | 0 .../my_package/data/data1.txt | 0 .../my_package/data/sub_data/data2.txt | 0 .../my_package/data/sub_data/data3.txt | 0 .../default_with_excluded_data/pyproject.toml | 39 + .../default_with_excluded_data_toml/LICENSE | 20 + .../README.rst | 2 + .../my_package/__init__.py | 0 .../my_package/data/data1.txt | 0 .../my_package/data/sub_data/data2.txt | 0 .../my_package/data/sub_data/data3.txt | 0 .../pyproject.toml | 41 + .../fixtures/disable_setup_py/README.rst | 2 + .../disable_setup_py/my_package/__init__.py | 0 .../fixtures/disable_setup_py/pyproject.toml | 35 + .../builders/fixtures/epoch/README.rst | 2 + .../masonry/builders/fixtures/epoch/epoch.py | 3 + .../builders/fixtures/epoch/pyproject.toml | 12 + .../exclude_whl_include_sdist/__init__.py | 1 + .../compiled/source.c | 0 .../compiled/source.h | 0 .../exclude_whl_include_sdist/cython_code.pyx | 0 .../exclude-whl-include-sdist/pyproject.toml | 17 + .../fixtures/exclude_nested_data_toml/LICENSE | 20 + .../exclude_nested_data_toml/README.rst | 2 + .../my_package/__init__.py | 0 .../my_package/data/data1.txt | 0 .../my_package/data/data2.txt | 0 .../my_package/data/sub_data/data2.txt | 0 .../my_package/data/sub_data/data3.txt | 0 .../my_package/puplic/item1/itemdata1.txt | 0 .../puplic/item1/subitem/subitemdata.txt | 0 .../my_package/puplic/item2/itemdata2.txt | 0 .../my_package/puplic/publicdata.txt | 0 .../exclude_nested_data_toml/pyproject.toml | 42 + .../fixtures/excluded_subpackage/README.rst | 2 + .../excluded_subpackage/example/__init__.py | 1 + .../example/test/__init__.py | 0 .../example/test/excluded.py | 5 + .../excluded_subpackage/pyproject.toml | 18 + .../builders/fixtures/extended/README.rst | 2 + .../builders/fixtures/extended/build.py | 8 + .../fixtures/extended/extended/__init__.py | 0 .../fixtures/extended/extended/extended.c | 58 + .../builders/fixtures/extended/pyproject.toml | 15 + .../builders/fixtures/extended/setup.py | 24 + .../extended_with_no_setup/README.rst | 2 + .../fixtures/extended_with_no_setup/build.py | 28 + .../extended/__init__.py | 0 .../extended/extended.c | 58 + .../extended_with_no_setup/pyproject.toml | 16 + .../lib/my_package/__init__.py | 0 .../lib/my_package/generated.py | 0 .../include_excluded_code/pyproject.toml | 20 + .../invalid_case_sensitive_exclusions/LICENSE | 20 + .../README.rst | 2 + .../my_package/Bar/foo/bar/Foo.py | 0 .../my_package/Foo/Bar.py | 0 .../my_package/Foo/IncludedBar.py | 0 .../my_package/Foo/SecondBar.py | 0 .../my_package/Foo/lowercasebar.py | 0 .../my_package/FooBar/Bar.py | 0 .../my_package/FooBar/lowercasebar.py | 0 .../my_package/__init__.py | 1 + .../pyproject.toml | 44 + .../fixtures/licenses_and_copying/COPYING | 0 .../fixtures/licenses_and_copying/COPYING.txt | 0 .../fixtures/licenses_and_copying/LICENSE | 20 + .../fixtures/licenses_and_copying/LICENSE.md | 0 .../licenses_and_copying/LICENSES/BSD-3.md | 0 .../LICENSES/CUSTOM-LICENSE | 0 .../licenses_and_copying/LICENSES/MIT.txt | 0 .../fixtures/licenses_and_copying/README.rst | 2 + .../my_package/__init__.py | 1 + .../licenses_and_copying/pyproject.toml | 49 + .../localversionlabel/localversionlabel.py | 1 + .../fixtures/localversionlabel/pyproject.toml | 5 + .../builders/fixtures/module1/README.rst | 2 + .../builders/fixtures/module1/module1.py | 3 + .../builders/fixtures/module1/pyproject.toml | 16 + .../pep_561_stub_only/pkg-stubs/__init__.pyi | 0 .../pep_561_stub_only/pkg-stubs/module.pyi | 4 + .../pkg-stubs/subpkg/__init__.pyi | 0 .../fixtures/pep_561_stub_only/pyproject.toml | 14 + .../pkg-stubs/__init__.pyi | 0 .../pkg-stubs/module.pyi | 4 + .../pkg-stubs/py.typed | 1 + .../pkg-stubs/subpkg/__init__.pyi | 0 .../pep_561_stub_only_partial/pyproject.toml | 14 + .../pkg-stubs/module.pyi | 4 + .../pkg-stubs/subpkg/__init__.pyi | 0 .../pkg-stubs/subpkg/py.typed | 1 + .../pyproject.toml | 14 + .../pep_561_stub_only_src/pyproject.toml | 14 + .../src/pkg-stubs/__init__.pyi | 0 .../src/pkg-stubs/module.pyi | 4 + .../src/pkg-stubs/subpkg/__init__.pyi | 0 .../builders/fixtures/prerelease/README.rst | 2 + .../fixtures/prerelease/prerelease.py | 3 + .../fixtures/prerelease/pyproject.toml | 12 + .../script_callable_legacy_string/README.rst | 2 + .../my_package/__init__.py | 1 + .../pyproject.toml | 19 + .../script_callable_legacy_table/README.rst | 2 + .../my_package/__init__.py | 1 + .../pyproject.toml | 21 + .../script_reference_console/README.rst | 2 + .../my_package/__init__.py | 1 + .../script_reference_console/pyproject.toml | 21 + .../fixtures/script_reference_file/README.rst | 2 + .../script_reference_file/bin/script.sh | 3 + .../my_package/__init__.py | 1 + .../script_reference_file/pyproject.toml | 19 + .../README.rst | 2 + .../bin/script.sh | 3 + .../my_package/__init__.py | 1 + .../pyproject.toml | 19 + .../script_reference_file_missing/README.rst | 2 + .../my_package/__init__.py | 1 + .../pyproject.toml | 19 + .../fixtures/simple_version/README.rst | 2 + .../fixtures/simple_version/pyproject.toml | 13 + .../fixtures/simple_version/simple_version.py | 3 + .../fixtures/single_python/README.rst | 2 + .../fixtures/single_python/pyproject.toml | 16 + .../fixtures/single_python/single_python.py | 3 + .../builders/fixtures/source_file/README.rst | 2 + .../fixtures/source_file/pyproject.toml | 16 + .../fixtures/source_file/src/module_src.py | 3 + .../fixtures/source_package/README.rst | 2 + .../fixtures/source_package/pyproject.toml | 15 + .../src/package_src/__init__.py | 0 .../source_package/src/package_src/module.py | 3 + .../split_source/lib_a/module_a/__init__.py | 0 .../split_source/lib_b/module_b/__init__.py | 0 .../fixtures/split_source/pyproject.toml | 15 + .../builders/fixtures/src_extended/README.rst | 2 + .../builders/fixtures/src_extended/build.py | 8 + .../fixtures/src_extended/pyproject.toml | 14 + .../builders/fixtures/src_extended/setup.py | 27 + .../src_extended/src/extended/__init__.py | 0 .../src_extended/src/extended/extended.c | 58 + .../builders/fixtures/with-include/LICENSE | 20 + .../builders/fixtures/with-include/README.rst | 2 + .../fixtures/with-include/extra_dir/README.md | 0 .../with-include/extra_dir/__init__.py | 0 .../extra_dir/sub_pkg/__init__.py | 0 .../extra_dir/sub_pkg/vcs_excluded.txt | 0 .../with-include/extra_dir/vcs_excluded.txt | 0 .../with-include/for_wheel_only/__init__.py | 0 .../fixtures/with-include/my_module.py | 0 .../builders/fixtures/with-include/notes.txt | 0 .../package_with_include/__init__.py | 1 + .../fixtures/with-include/pyproject.toml | 55 + .../with-include/src/src_package/__init__.py | 0 .../fixtures/with-include/tests/__init__.py | 0 .../fixtures/with_bad_path_dep/pyproject.toml | 9 + .../with_bad_path_dep/__init__.py | 0 .../with_bad_path_dev_dep/pyproject.toml | 11 + .../with_bad_path_dev_dep/__init__.py | 0 .../with_include_inline_table/both.txt | 0 .../with_include_inline_table/pyproject.toml | 48 + .../src/src_package/__init__.py | 0 .../tests/__init__.py | 0 .../tests/test_foo/test.py | 0 .../with_include_inline_table/wheel_only.txt | 0 .../with_url_dependency/pyproject.toml | 24 + .../with_url_dependency/__init__.py | 0 .../with_vcs_dependency/pyproject.toml | 24 + .../with_vcs_dependency/__init__.py | 0 .../my_package/__init__.py | 0 .../pyproject.toml | 11 + tests/masonry/builders/test_builder.py | 302 + tests/masonry/builders/test_complete.py | 629 ++ tests/masonry/builders/test_sdist.py | 670 ++ tests/masonry/builders/test_wheel.py | 352 + tests/masonry/test_api.py | 256 + tests/masonry/utils/__init__.py | 0 .../pep_561_stub_only/bad/__init__.pyi | 0 .../fixtures/pep_561_stub_only/bad/module.pyi | 4 + .../pep_561_stub_only/good-stubs/__init__.pyi | 0 .../pep_561_stub_only/good-stubs/module.pyi | 4 + .../good-stubs/module.pyi | 4 + .../good-stubs/subpkg/__init__.pyi | 0 .../good-stubs/subpkg/py.typed | 1 + .../utils/fixtures/with_includes/__init__.py | 0 .../utils/fixtures/with_includes/bar/baz.py | 0 .../extra_package/some_dir/foo.py | 0 .../extra_package/some_dir/quux.py | 0 .../with_includes/not_a_python_pkg/baz.txt | 0 tests/masonry/utils/test_helpers.py | 23 + tests/masonry/utils/test_package_include.py | 85 + tests/packages/__init__.py | 0 tests/packages/test_dependency.py | 364 ++ tests/packages/test_dependency_group.py | 26 + tests/packages/test_directory_dependency.py | 143 + tests/packages/test_file_dependency.py | 234 + tests/packages/test_main.py | 331 + tests/packages/test_package.py | 681 ++ tests/packages/test_specification.py | 117 + tests/packages/test_url_dependency.py | 91 + tests/packages/test_vcs_dependency.py | 178 + tests/packages/utils/__init__.py | 0 tests/packages/utils/test_utils.py | 250 + tests/packages/utils/test_utils_link.py | 145 + tests/packages/utils/test_utils_urls.py | 64 + tests/pyproject/__init__.py | 0 tests/pyproject/conftest.py | 43 + tests/pyproject/test_pyproject_toml.py | 113 + tests/pyproject/test_pyproject_toml_file.py | 39 + tests/spdx/__init__.py | 0 tests/spdx/test_helpers.py | 54 + tests/spdx/test_license.py | 59 + tests/test_core_version.py | 11 + tests/test_factory.py | 411 ++ tests/testutils.py | 82 + tests/utils/__init__.py | 0 tests/utils/test_helpers.py | 120 + tests/vcs/__init__.py | 0 tests/vcs/test_vcs.py | 475 ++ tests/version/__init__.py | 0 tests/version/pep440/__init__.py | 0 tests/version/pep440/test_segments.py | 110 + tests/version/pep440/test_version.py | 343 + tests/version/test_markers.py | 1468 +++++ tests/version/test_requirements.py | 134 + tox.ini | 22 + vendors/patches/jsonschema.patch | 36 + vendors/poetry.lock | 219 + vendors/pyproject.toml | 32 + 591 files changed, 75001 insertions(+) create mode 100644 .flake8 create mode 100644 .gitattributes create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/workflows/downstream.yml create mode 100644 .github/workflows/integration.yml create mode 100644 .github/workflows/release.yml create mode 100644 .github/workflows/tests.yml create mode 100644 .gitignore create mode 100644 .pre-commit-config.yaml create mode 100644 CHANGELOG.md create mode 100644 LICENSE create mode 100644 Makefile create mode 100644 README.md create mode 100644 poetry.lock create mode 100644 pyproject.toml create mode 100644 src/poetry/core/__init__.py create mode 100644 src/poetry/core/_vendor/_pyrsistent_version.py create mode 100644 src/poetry/core/_vendor/attr/__init__.py create mode 100644 src/poetry/core/_vendor/attr/_cmp.py create mode 100644 src/poetry/core/_vendor/attr/_compat.py create mode 100644 src/poetry/core/_vendor/attr/_config.py create mode 100644 src/poetry/core/_vendor/attr/_funcs.py create mode 100644 src/poetry/core/_vendor/attr/_make.py create mode 100644 src/poetry/core/_vendor/attr/_next_gen.py create mode 100644 src/poetry/core/_vendor/attr/_version_info.py create mode 100644 src/poetry/core/_vendor/attr/converters.py create mode 100644 src/poetry/core/_vendor/attr/exceptions.py create mode 100644 src/poetry/core/_vendor/attr/filters.py create mode 100644 src/poetry/core/_vendor/attr/py.typed create mode 100644 src/poetry/core/_vendor/attr/setters.py create mode 100644 src/poetry/core/_vendor/attr/validators.py create mode 100644 src/poetry/core/_vendor/attrs/LICENSE create mode 100644 src/poetry/core/_vendor/attrs/__init__.py create mode 100644 src/poetry/core/_vendor/attrs/converters.py create mode 100644 src/poetry/core/_vendor/attrs/exceptions.py create mode 100644 src/poetry/core/_vendor/attrs/filters.py create mode 100644 src/poetry/core/_vendor/attrs/py.typed create mode 100644 src/poetry/core/_vendor/attrs/setters.py create mode 100644 src/poetry/core/_vendor/attrs/validators.py create mode 100644 src/poetry/core/_vendor/jsonschema/COPYING create mode 100644 src/poetry/core/_vendor/jsonschema/__init__.py create mode 100644 src/poetry/core/_vendor/jsonschema/__main__.py create mode 100644 src/poetry/core/_vendor/jsonschema/_format.py create mode 100644 src/poetry/core/_vendor/jsonschema/_legacy_validators.py create mode 100644 src/poetry/core/_vendor/jsonschema/_types.py create mode 100644 src/poetry/core/_vendor/jsonschema/_utils.py create mode 100644 src/poetry/core/_vendor/jsonschema/_validators.py create mode 100644 src/poetry/core/_vendor/jsonschema/benchmarks/__init__.py create mode 100644 src/poetry/core/_vendor/jsonschema/benchmarks/issue232.py create mode 100644 src/poetry/core/_vendor/jsonschema/benchmarks/issue232/issue.json create mode 100644 src/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py create mode 100644 src/poetry/core/_vendor/jsonschema/cli.py create mode 100644 src/poetry/core/_vendor/jsonschema/exceptions.py create mode 100644 src/poetry/core/_vendor/jsonschema/protocols.py create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft2019-09.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft2020-12.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft3.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft4.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft6.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/draft7.json create mode 100644 src/poetry/core/_vendor/jsonschema/schemas/vocabularies.json create mode 100644 src/poetry/core/_vendor/jsonschema/validators.py create mode 100644 src/poetry/core/_vendor/lark/LICENSE create mode 100644 src/poetry/core/_vendor/lark/__init__.py create mode 100644 src/poetry/core/_vendor/lark/__pyinstaller/__init__.py create mode 100644 src/poetry/core/_vendor/lark/__pyinstaller/hook-lark.py create mode 100644 src/poetry/core/_vendor/lark/ast_utils.py create mode 100644 src/poetry/core/_vendor/lark/common.py create mode 100644 src/poetry/core/_vendor/lark/exceptions.py create mode 100644 src/poetry/core/_vendor/lark/grammar.py create mode 100644 src/poetry/core/_vendor/lark/grammars/__init__.py create mode 100644 src/poetry/core/_vendor/lark/grammars/common.lark create mode 100644 src/poetry/core/_vendor/lark/grammars/lark.lark create mode 100644 src/poetry/core/_vendor/lark/grammars/python.lark create mode 100644 src/poetry/core/_vendor/lark/grammars/unicode.lark create mode 100644 src/poetry/core/_vendor/lark/indenter.py create mode 100644 src/poetry/core/_vendor/lark/lark.py create mode 100644 src/poetry/core/_vendor/lark/lexer.py create mode 100644 src/poetry/core/_vendor/lark/load_grammar.py create mode 100644 src/poetry/core/_vendor/lark/parse_tree_builder.py create mode 100644 src/poetry/core/_vendor/lark/parser_frontends.py create mode 100644 src/poetry/core/_vendor/lark/parsers/__init__.py create mode 100644 src/poetry/core/_vendor/lark/parsers/cyk.py create mode 100644 src/poetry/core/_vendor/lark/parsers/earley.py create mode 100644 src/poetry/core/_vendor/lark/parsers/earley_common.py create mode 100644 src/poetry/core/_vendor/lark/parsers/earley_forest.py create mode 100644 src/poetry/core/_vendor/lark/parsers/grammar_analysis.py create mode 100644 src/poetry/core/_vendor/lark/parsers/lalr_analysis.py create mode 100644 src/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py create mode 100644 src/poetry/core/_vendor/lark/parsers/lalr_parser.py create mode 100644 src/poetry/core/_vendor/lark/parsers/resolve_ambig.py create mode 100644 src/poetry/core/_vendor/lark/parsers/xearley.py create mode 100644 src/poetry/core/_vendor/lark/py.typed create mode 100644 src/poetry/core/_vendor/lark/reconstruct.py create mode 100644 src/poetry/core/_vendor/lark/tools/__init__.py create mode 100644 src/poetry/core/_vendor/lark/tools/nearley.py create mode 100644 src/poetry/core/_vendor/lark/tools/serialize.py create mode 100644 src/poetry/core/_vendor/lark/tools/standalone.py create mode 100644 src/poetry/core/_vendor/lark/tree.py create mode 100644 src/poetry/core/_vendor/lark/tree_matcher.py create mode 100644 src/poetry/core/_vendor/lark/tree_templates.py create mode 100644 src/poetry/core/_vendor/lark/utils.py create mode 100644 src/poetry/core/_vendor/lark/visitors.py create mode 100644 src/poetry/core/_vendor/packaging/LICENSE create mode 100644 src/poetry/core/_vendor/packaging/LICENSE.APACHE create mode 100644 src/poetry/core/_vendor/packaging/LICENSE.BSD create mode 100644 src/poetry/core/_vendor/packaging/__about__.py create mode 100644 src/poetry/core/_vendor/packaging/__init__.py create mode 100644 src/poetry/core/_vendor/packaging/_manylinux.py create mode 100644 src/poetry/core/_vendor/packaging/_musllinux.py create mode 100644 src/poetry/core/_vendor/packaging/_structures.py create mode 100644 src/poetry/core/_vendor/packaging/markers.py create mode 100644 src/poetry/core/_vendor/packaging/py.typed create mode 100644 src/poetry/core/_vendor/packaging/requirements.py create mode 100644 src/poetry/core/_vendor/packaging/specifiers.py create mode 100644 src/poetry/core/_vendor/packaging/tags.py create mode 100644 src/poetry/core/_vendor/packaging/utils.py create mode 100644 src/poetry/core/_vendor/packaging/version.py create mode 100644 src/poetry/core/_vendor/pyparsing/LICENSE create mode 100644 src/poetry/core/_vendor/pyparsing/__init__.py create mode 100644 src/poetry/core/_vendor/pyparsing/actions.py create mode 100644 src/poetry/core/_vendor/pyparsing/common.py create mode 100644 src/poetry/core/_vendor/pyparsing/core.py create mode 100644 src/poetry/core/_vendor/pyparsing/diagram/__init__.py create mode 100644 src/poetry/core/_vendor/pyparsing/exceptions.py create mode 100644 src/poetry/core/_vendor/pyparsing/helpers.py create mode 100644 src/poetry/core/_vendor/pyparsing/py.typed create mode 100644 src/poetry/core/_vendor/pyparsing/results.py create mode 100644 src/poetry/core/_vendor/pyparsing/testing.py create mode 100644 src/poetry/core/_vendor/pyparsing/unicode.py create mode 100644 src/poetry/core/_vendor/pyparsing/util.py create mode 100644 src/poetry/core/_vendor/pyrsistent/LICENSE.mit create mode 100644 src/poetry/core/_vendor/pyrsistent/__init__.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_checked_types.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_field_common.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_helpers.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_immutable.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pbag.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pclass.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pdeque.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_plist.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pmap.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_precord.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pset.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_pvector.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_toolz.py create mode 100644 src/poetry/core/_vendor/pyrsistent/_transformations.py create mode 100644 src/poetry/core/_vendor/pyrsistent/py.typed create mode 100644 src/poetry/core/_vendor/pyrsistent/typing.py create mode 100644 src/poetry/core/_vendor/tomlkit/LICENSE create mode 100644 src/poetry/core/_vendor/tomlkit/__init__.py create mode 100644 src/poetry/core/_vendor/tomlkit/_compat.py create mode 100644 src/poetry/core/_vendor/tomlkit/_utils.py create mode 100644 src/poetry/core/_vendor/tomlkit/api.py create mode 100644 src/poetry/core/_vendor/tomlkit/container.py create mode 100644 src/poetry/core/_vendor/tomlkit/exceptions.py create mode 100644 src/poetry/core/_vendor/tomlkit/items.py create mode 100644 src/poetry/core/_vendor/tomlkit/parser.py create mode 100644 src/poetry/core/_vendor/tomlkit/py.typed create mode 100644 src/poetry/core/_vendor/tomlkit/source.py create mode 100644 src/poetry/core/_vendor/tomlkit/toml_char.py create mode 100644 src/poetry/core/_vendor/tomlkit/toml_document.py create mode 100644 src/poetry/core/_vendor/tomlkit/toml_file.py create mode 100644 src/poetry/core/_vendor/typing_extensions.LICENSE create mode 100644 src/poetry/core/_vendor/typing_extensions.py create mode 100644 src/poetry/core/_vendor/vendor.txt create mode 100644 src/poetry/core/constraints/__init__.py create mode 100644 src/poetry/core/constraints/generic/__init__.py create mode 100644 src/poetry/core/constraints/generic/any_constraint.py create mode 100644 src/poetry/core/constraints/generic/base_constraint.py create mode 100644 src/poetry/core/constraints/generic/constraint.py create mode 100644 src/poetry/core/constraints/generic/empty_constraint.py create mode 100644 src/poetry/core/constraints/generic/multi_constraint.py create mode 100644 src/poetry/core/constraints/generic/parser.py create mode 100644 src/poetry/core/constraints/generic/union_constraint.py create mode 100644 src/poetry/core/constraints/version/__init__.py create mode 100644 src/poetry/core/constraints/version/empty_constraint.py create mode 100644 src/poetry/core/constraints/version/exceptions.py create mode 100644 src/poetry/core/constraints/version/parser.py create mode 100644 src/poetry/core/constraints/version/patterns.py create mode 100644 src/poetry/core/constraints/version/util.py create mode 100644 src/poetry/core/constraints/version/version.py create mode 100644 src/poetry/core/constraints/version/version_constraint.py create mode 100644 src/poetry/core/constraints/version/version_range.py create mode 100644 src/poetry/core/constraints/version/version_range_constraint.py create mode 100644 src/poetry/core/constraints/version/version_union.py create mode 100644 src/poetry/core/exceptions/__init__.py create mode 100644 src/poetry/core/exceptions/base.py create mode 100644 src/poetry/core/factory.py create mode 100644 src/poetry/core/json/__init__.py create mode 100644 src/poetry/core/json/schemas/poetry-schema.json create mode 100644 src/poetry/core/masonry/__init__.py create mode 100644 src/poetry/core/masonry/api.py create mode 100644 src/poetry/core/masonry/builder.py create mode 100644 src/poetry/core/masonry/builders/__init__.py create mode 100644 src/poetry/core/masonry/builders/builder.py create mode 100644 src/poetry/core/masonry/builders/sdist.py create mode 100644 src/poetry/core/masonry/builders/wheel.py create mode 100644 src/poetry/core/masonry/metadata.py create mode 100644 src/poetry/core/masonry/utils/__init__.py create mode 100644 src/poetry/core/masonry/utils/helpers.py create mode 100644 src/poetry/core/masonry/utils/include.py create mode 100644 src/poetry/core/masonry/utils/module.py create mode 100644 src/poetry/core/masonry/utils/package_include.py create mode 100644 src/poetry/core/packages/__init__.py create mode 100644 src/poetry/core/packages/constraints/__init__.py create mode 100644 src/poetry/core/packages/dependency.py create mode 100644 src/poetry/core/packages/dependency_group.py create mode 100644 src/poetry/core/packages/directory_dependency.py create mode 100644 src/poetry/core/packages/file_dependency.py create mode 100644 src/poetry/core/packages/package.py create mode 100644 src/poetry/core/packages/project_package.py create mode 100644 src/poetry/core/packages/specification.py create mode 100644 src/poetry/core/packages/url_dependency.py create mode 100644 src/poetry/core/packages/utils/__init__.py create mode 100644 src/poetry/core/packages/utils/link.py create mode 100644 src/poetry/core/packages/utils/utils.py create mode 100644 src/poetry/core/packages/vcs_dependency.py create mode 100644 src/poetry/core/poetry.py create mode 100644 src/poetry/core/py.typed create mode 100644 src/poetry/core/pyproject/__init__.py create mode 100644 src/poetry/core/pyproject/exceptions.py create mode 100644 src/poetry/core/pyproject/tables.py create mode 100644 src/poetry/core/pyproject/toml.py create mode 100644 src/poetry/core/semver/__init__.py create mode 100644 src/poetry/core/semver/empty_constraint.py create mode 100644 src/poetry/core/semver/exceptions.py create mode 100644 src/poetry/core/semver/helpers.py create mode 100644 src/poetry/core/semver/patterns.py create mode 100644 src/poetry/core/semver/util.py create mode 100644 src/poetry/core/semver/version.py create mode 100644 src/poetry/core/semver/version_constraint.py create mode 100644 src/poetry/core/semver/version_range.py create mode 100644 src/poetry/core/semver/version_range_constraint.py create mode 100644 src/poetry/core/semver/version_union.py create mode 100644 src/poetry/core/spdx/__init__.py create mode 100644 src/poetry/core/spdx/data/licenses.json create mode 100644 src/poetry/core/spdx/helpers.py create mode 100644 src/poetry/core/spdx/license.py create mode 100644 src/poetry/core/spdx/updater.py create mode 100644 src/poetry/core/toml/__init__.py create mode 100644 src/poetry/core/toml/exceptions.py create mode 100644 src/poetry/core/toml/file.py create mode 100644 src/poetry/core/utils/__init__.py create mode 100644 src/poetry/core/utils/_compat.py create mode 100644 src/poetry/core/utils/helpers.py create mode 100644 src/poetry/core/utils/patterns.py create mode 100644 src/poetry/core/utils/toml_file.py create mode 100644 src/poetry/core/vcs/__init__.py create mode 100644 src/poetry/core/vcs/git.py create mode 100644 src/poetry/core/version/__init__.py create mode 100644 src/poetry/core/version/exceptions.py create mode 100644 src/poetry/core/version/grammars/__init__.py create mode 100644 src/poetry/core/version/grammars/markers.lark create mode 100644 src/poetry/core/version/grammars/pep508.lark create mode 100644 src/poetry/core/version/helpers.py create mode 100644 src/poetry/core/version/markers.py create mode 100644 src/poetry/core/version/parser.py create mode 100644 src/poetry/core/version/pep440/__init__.py create mode 100644 src/poetry/core/version/pep440/parser.py create mode 100644 src/poetry/core/version/pep440/segments.py create mode 100644 src/poetry/core/version/pep440/version.py create mode 100644 src/poetry/core/version/requirements.py create mode 100644 tests/__init__.py create mode 100644 tests/conftest.py create mode 100644 tests/constraints/__init__.py create mode 100644 tests/constraints/generic/__init__.py create mode 100644 tests/constraints/generic/test_constraint.py create mode 100644 tests/constraints/generic/test_main.py create mode 100644 tests/constraints/generic/test_multi_constraint.py create mode 100644 tests/constraints/generic/test_union_constraint.py create mode 100644 tests/constraints/version/__init__.py create mode 100644 tests/constraints/version/test_helpers.py create mode 100644 tests/constraints/version/test_parse_constraint.py create mode 100644 tests/constraints/version/test_utils.py create mode 100644 tests/constraints/version/test_version.py create mode 100644 tests/constraints/version/test_version_constraint.py create mode 100644 tests/constraints/version/test_version_range.py create mode 100644 tests/fixtures/complete.toml create mode 100644 tests/fixtures/distributions/demo-0.1.0-py2.py3-none-any.whl create mode 100644 tests/fixtures/distributions/demo-0.1.0.tar.gz create mode 100644 tests/fixtures/invalid_pyproject/pyproject.toml create mode 100644 tests/fixtures/pep_517_backend/README.md create mode 100644 tests/fixtures/pep_517_backend/foo/__init__.py create mode 100644 tests/fixtures/pep_517_backend/pyproject.toml create mode 100644 tests/fixtures/project_failing_strict_validation/pyproject.toml create mode 100644 tests/fixtures/project_with_build_system_requires/pyproject.toml create mode 100644 tests/fixtures/project_with_groups_and_explicit_main/README.rst create mode 100644 tests/fixtures/project_with_groups_and_explicit_main/pyproject.toml create mode 100644 tests/fixtures/project_with_groups_and_explicit_main/simple_project/__init__.py create mode 100644 tests/fixtures/project_with_groups_and_legacy_dev/README.rst create mode 100644 tests/fixtures/project_with_groups_and_legacy_dev/pyproject.toml create mode 100644 tests/fixtures/project_with_groups_and_legacy_dev/simple_project/__init__.py create mode 100644 tests/fixtures/project_with_invalid_dev_deps/pyproject.toml create mode 100644 tests/fixtures/project_with_markers_and_extras/project/__init__.py create mode 100644 tests/fixtures/project_with_markers_and_extras/pyproject.toml create mode 100644 tests/fixtures/project_with_multi_constraints_dependency/project/__init__.py create mode 100644 tests/fixtures/project_with_multi_constraints_dependency/pyproject.toml create mode 100644 tests/fixtures/project_with_pep517_non_poetry/pyproject.toml create mode 100644 tests/fixtures/project_with_setup/my_package/__init__.py create mode 100644 tests/fixtures/project_with_setup/setup.py create mode 100644 tests/fixtures/project_with_setup_cfg_only/setup.cfg create mode 100644 tests/fixtures/sample_project/README.rst create mode 100644 tests/fixtures/sample_project/pyproject.toml create mode 100644 tests/fixtures/script-files/sample_script.py create mode 100644 tests/fixtures/script-files/sample_script.sh create mode 100644 tests/fixtures/simple_project/README.rst create mode 100644 tests/fixtures/simple_project/dist/simple_project-1.2.3-py2.py3-none-any.whl create mode 100644 tests/fixtures/simple_project/dist/simple_project-1.2.3.tar.gz create mode 100644 tests/fixtures/simple_project/pyproject.toml create mode 100644 tests/fixtures/simple_project/simple_project/__init__.py create mode 100644 tests/fixtures/with_readme_files/README-1.rst create mode 100644 tests/fixtures/with_readme_files/README-2.rst create mode 100644 tests/fixtures/with_readme_files/pyproject.toml create mode 100644 tests/fixtures/with_readme_files/single_python.py create mode 100644 tests/integration/__init__.py create mode 100644 tests/integration/test_pep517.py create mode 100644 tests/integration/test_pep517_backend.py create mode 100644 tests/json/__init__.py create mode 100644 tests/json/test_poetry_schema.py create mode 100644 tests/masonry/__init__.py create mode 100644 tests/masonry/builders/__init__.py create mode 100644 tests/masonry/builders/fixtures/build_script_in_subdir/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/build_script_in_subdir/scripts/build.py create mode 100644 tests/masonry/builders/fixtures/build_script_in_subdir/src/foo.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/LICENSE create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/README.rst create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/Bar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/IncludedBar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/SecondBar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/lowercasebar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/Bar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/lowercasebar.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/CapitalFoo.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/foo.py create mode 100644 tests/masonry/builders/fixtures/case_sensitive_exclusions/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/comma_file/comma_file/__init__.py create mode 100644 tests/masonry/builders/fixtures/comma_file/comma_file/a,b.py create mode 100644 tests/masonry/builders/fixtures/comma_file/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/complete/LICENSE create mode 100644 tests/masonry/builders/fixtures/complete/README.rst create mode 100644 tests/masonry/builders/fixtures/complete/bin/script.sh create mode 100644 tests/masonry/builders/fixtures/complete/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/complete/my_package/data1/test.json create mode 100644 tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/__init__.py create mode 100644 tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/extra_file.xml create mode 100644 tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/__init__.py create mode 100644 tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/data.json create mode 100644 tests/masonry/builders/fixtures/complete/my_package/sub_pkg3/foo.py create mode 100644 tests/masonry/builders/fixtures/complete/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/LICENSE create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/README.rst create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/data1.txt create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data2.txt create mode 100644 tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data3.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/LICENSE create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/README.rst create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/data1.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data2.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data3.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/LICENSE create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/README.rst create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/data1.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data2.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data3.txt create mode 100644 tests/masonry/builders/fixtures/default_with_excluded_data_toml/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/disable_setup_py/README.rst create mode 100644 tests/masonry/builders/fixtures/disable_setup_py/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/disable_setup_py/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/epoch/README.rst create mode 100644 tests/masonry/builders/fixtures/epoch/epoch.py create mode 100644 tests/masonry/builders/fixtures/epoch/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/__init__.py create mode 100644 tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.c create mode 100644 tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.h create mode 100644 tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/cython_code.pyx create mode 100644 tests/masonry/builders/fixtures/exclude-whl-include-sdist/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/LICENSE create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/README.rst create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data1.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data2.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data2.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data3.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/itemdata1.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/subitem/subitemdata.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item2/itemdata2.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/publicdata.txt create mode 100644 tests/masonry/builders/fixtures/exclude_nested_data_toml/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/excluded_subpackage/README.rst create mode 100644 tests/masonry/builders/fixtures/excluded_subpackage/example/__init__.py create mode 100644 tests/masonry/builders/fixtures/excluded_subpackage/example/test/__init__.py create mode 100644 tests/masonry/builders/fixtures/excluded_subpackage/example/test/excluded.py create mode 100644 tests/masonry/builders/fixtures/excluded_subpackage/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/extended/README.rst create mode 100644 tests/masonry/builders/fixtures/extended/build.py create mode 100644 tests/masonry/builders/fixtures/extended/extended/__init__.py create mode 100644 tests/masonry/builders/fixtures/extended/extended/extended.c create mode 100644 tests/masonry/builders/fixtures/extended/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/extended/setup.py create mode 100644 tests/masonry/builders/fixtures/extended_with_no_setup/README.rst create mode 100644 tests/masonry/builders/fixtures/extended_with_no_setup/build.py create mode 100644 tests/masonry/builders/fixtures/extended_with_no_setup/extended/__init__.py create mode 100644 tests/masonry/builders/fixtures/extended_with_no_setup/extended/extended.c create mode 100644 tests/masonry/builders/fixtures/extended_with_no_setup/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/generated.py create mode 100644 tests/masonry/builders/fixtures/include_excluded_code/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/LICENSE create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/README.rst create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/foo/bar/Foo.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/Bar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/IncludedBar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/SecondBar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/lowercasebar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/Bar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/lowercasebar.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/COPYING create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/COPYING.txt create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/LICENSE create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/LICENSE.md create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/BSD-3.md create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/CUSTOM-LICENSE create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/MIT.txt create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/README.rst create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/licenses_and_copying/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/localversionlabel/localversionlabel.py create mode 100644 tests/masonry/builders/fixtures/localversionlabel/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/module1/README.rst create mode 100644 tests/masonry/builders/fixtures/module1/module1.py create mode 100644 tests/masonry/builders/fixtures/module1/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/module.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/subpkg/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/module.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/py.typed create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/subpkg/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/module.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/py.typed create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_src/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/module.pyi create mode 100644 tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/subpkg/__init__.pyi create mode 100644 tests/masonry/builders/fixtures/prerelease/README.rst create mode 100644 tests/masonry/builders/fixtures/prerelease/prerelease.py create mode 100644 tests/masonry/builders/fixtures/prerelease/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_string/README.rst create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_string/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_string/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_table/README.rst create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_table/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_callable_legacy_table/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_reference_console/README.rst create mode 100644 tests/masonry/builders/fixtures/script_reference_console/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_reference_console/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_reference_file/README.rst create mode 100644 tests/masonry/builders/fixtures/script_reference_file/bin/script.sh create mode 100644 tests/masonry/builders/fixtures/script_reference_file/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_reference_file/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_reference_file_invalid_definition/README.rst create mode 100644 tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin/script.sh create mode 100644 tests/masonry/builders/fixtures/script_reference_file_invalid_definition/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_reference_file_invalid_definition/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/script_reference_file_missing/README.rst create mode 100644 tests/masonry/builders/fixtures/script_reference_file_missing/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/script_reference_file_missing/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/simple_version/README.rst create mode 100644 tests/masonry/builders/fixtures/simple_version/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/simple_version/simple_version.py create mode 100644 tests/masonry/builders/fixtures/single_python/README.rst create mode 100644 tests/masonry/builders/fixtures/single_python/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/single_python/single_python.py create mode 100644 tests/masonry/builders/fixtures/source_file/README.rst create mode 100644 tests/masonry/builders/fixtures/source_file/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/source_file/src/module_src.py create mode 100644 tests/masonry/builders/fixtures/source_package/README.rst create mode 100644 tests/masonry/builders/fixtures/source_package/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/source_package/src/package_src/__init__.py create mode 100644 tests/masonry/builders/fixtures/source_package/src/package_src/module.py create mode 100644 tests/masonry/builders/fixtures/split_source/lib_a/module_a/__init__.py create mode 100644 tests/masonry/builders/fixtures/split_source/lib_b/module_b/__init__.py create mode 100644 tests/masonry/builders/fixtures/split_source/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/src_extended/README.rst create mode 100644 tests/masonry/builders/fixtures/src_extended/build.py create mode 100644 tests/masonry/builders/fixtures/src_extended/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/src_extended/setup.py create mode 100644 tests/masonry/builders/fixtures/src_extended/src/extended/__init__.py create mode 100644 tests/masonry/builders/fixtures/src_extended/src/extended/extended.c create mode 100644 tests/masonry/builders/fixtures/with-include/LICENSE create mode 100644 tests/masonry/builders/fixtures/with-include/README.rst create mode 100644 tests/masonry/builders/fixtures/with-include/extra_dir/README.md create mode 100644 tests/masonry/builders/fixtures/with-include/extra_dir/__init__.py create mode 100644 tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/__init__.py create mode 100644 tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/vcs_excluded.txt create mode 100644 tests/masonry/builders/fixtures/with-include/extra_dir/vcs_excluded.txt create mode 100644 tests/masonry/builders/fixtures/with-include/for_wheel_only/__init__.py create mode 100644 tests/masonry/builders/fixtures/with-include/my_module.py create mode 100644 tests/masonry/builders/fixtures/with-include/notes.txt create mode 100644 tests/masonry/builders/fixtures/with-include/package_with_include/__init__.py create mode 100644 tests/masonry/builders/fixtures/with-include/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with-include/src/src_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/with-include/tests/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_bad_path_dep/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with_bad_path_dep/with_bad_path_dep/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_bad_path_dev_dep/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with_bad_path_dev_dep/with_bad_path_dev_dep/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/both.txt create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/src/src_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/tests/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/tests/test_foo/test.py create mode 100644 tests/masonry/builders/fixtures/with_include_inline_table/wheel_only.txt create mode 100644 tests/masonry/builders/fixtures/with_url_dependency/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with_url_dependency/with_url_dependency/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_vcs_dependency/pyproject.toml create mode 100644 tests/masonry/builders/fixtures/with_vcs_dependency/with_vcs_dependency/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/my_package/__init__.py create mode 100644 tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/pyproject.toml create mode 100644 tests/masonry/builders/test_builder.py create mode 100644 tests/masonry/builders/test_complete.py create mode 100644 tests/masonry/builders/test_sdist.py create mode 100644 tests/masonry/builders/test_wheel.py create mode 100644 tests/masonry/test_api.py create mode 100644 tests/masonry/utils/__init__.py create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only/bad/__init__.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only/bad/module.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/__init__.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/module.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/module.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/__init__.pyi create mode 100644 tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed create mode 100644 tests/masonry/utils/fixtures/with_includes/__init__.py create mode 100644 tests/masonry/utils/fixtures/with_includes/bar/baz.py create mode 100644 tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/foo.py create mode 100644 tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/quux.py create mode 100644 tests/masonry/utils/fixtures/with_includes/not_a_python_pkg/baz.txt create mode 100644 tests/masonry/utils/test_helpers.py create mode 100644 tests/masonry/utils/test_package_include.py create mode 100644 tests/packages/__init__.py create mode 100644 tests/packages/test_dependency.py create mode 100644 tests/packages/test_dependency_group.py create mode 100644 tests/packages/test_directory_dependency.py create mode 100644 tests/packages/test_file_dependency.py create mode 100644 tests/packages/test_main.py create mode 100644 tests/packages/test_package.py create mode 100644 tests/packages/test_specification.py create mode 100644 tests/packages/test_url_dependency.py create mode 100644 tests/packages/test_vcs_dependency.py create mode 100644 tests/packages/utils/__init__.py create mode 100644 tests/packages/utils/test_utils.py create mode 100644 tests/packages/utils/test_utils_link.py create mode 100644 tests/packages/utils/test_utils_urls.py create mode 100644 tests/pyproject/__init__.py create mode 100644 tests/pyproject/conftest.py create mode 100644 tests/pyproject/test_pyproject_toml.py create mode 100644 tests/pyproject/test_pyproject_toml_file.py create mode 100644 tests/spdx/__init__.py create mode 100644 tests/spdx/test_helpers.py create mode 100644 tests/spdx/test_license.py create mode 100644 tests/test_core_version.py create mode 100644 tests/test_factory.py create mode 100644 tests/testutils.py create mode 100644 tests/utils/__init__.py create mode 100644 tests/utils/test_helpers.py create mode 100644 tests/vcs/__init__.py create mode 100644 tests/vcs/test_vcs.py create mode 100644 tests/version/__init__.py create mode 100644 tests/version/pep440/__init__.py create mode 100644 tests/version/pep440/test_segments.py create mode 100644 tests/version/pep440/test_version.py create mode 100644 tests/version/test_markers.py create mode 100644 tests/version/test_requirements.py create mode 100644 tox.ini create mode 100644 vendors/patches/jsonschema.patch create mode 100644 vendors/poetry.lock create mode 100644 vendors/pyproject.toml diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..c20f845 --- /dev/null +++ b/.flake8 @@ -0,0 +1,28 @@ +[flake8] +min_python_version = 3.7.0 +max-line-length = 88 +ban-relative-imports = True +# flake8-use-fstring: https://github.com/MichaelKim0407/flake8-use-fstring#--percent-greedy-and---format-greedy +format-greedy = 1 +inline-quotes = double +enable-extensions = TC, TC1 +type-checking-strict = true +eradicate-whitelist-extend = ^-.*; +extend-ignore = + # E203: Whitespace before ':' (pycqa/pycodestyle#373) + E203, + # E501: Line too long + E501, + # SIM106: Handle error-cases first + SIM106, + # ANN101: Missing type annotation for self in method + ANN101, + # ANN102: Missing type annotation for cls in classmethod + ANN102, +per-file-ignores = + tests/test_*:ANN201 + tests/**/test_*:ANN201 +extend-exclude = + src/poetry/core/_vendor/* + tests/fixtures/* + tests/**/fixtures/* diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000..c7b1818 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +poetry.lock linguist-generated=true +vendors/poetry.lock linguist-generated=true +poetry/core/_vendor/** linguist-generated=true +poetry/core/_vendor/vendor.txt linguist-generated=false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..2d33a8d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,12 @@ +# Ref: https://help.github.com/en/github/building-a-strong-community/configuring-issue-templates-for-your-repository#configuring-the-template-chooser +blank_issues_enabled: false +contact_links: +- name: '✏️ Poetry Issue Tracker' + url: https://github.com/python-poetry/poetry/issues/new/choose + about: | + Submit your issues to the Poetry issue tracker. Bug reports and feature requests + will be tracked there. +- name: '💬 Discord Server' + url: https://discordapp.com/invite/awxPgve + about: | + Chat with the community, ask questions and learn about best practices. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 0000000..2e95774 --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,13 @@ +Resolves: python-poetry# + + + +- [ ] Added **tests** for changed code. +- [ ] Updated **documentation** for changed code. + + diff --git a/.github/workflows/downstream.yml b/.github/workflows/downstream.yml new file mode 100644 index 0000000..293f9ac --- /dev/null +++ b/.github/workflows/downstream.yml @@ -0,0 +1,76 @@ +name: Poetry Downstream Tests + +on: + pull_request: {} + push: + branches: [main] + +jobs: + Tests: + name: ${{ matrix.ref }} + runs-on: ubuntu-latest + strategy: + matrix: + ref: ["master", "1.2"] + fail-fast: false + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v3 + with: + path: poetry-core + + - uses: actions/checkout@v3 + with: + path: poetry + repository: python-poetry/poetry + ref: ${{ matrix.ref }} + + - name: Set up Python 3.10 + uses: actions/setup-python@v3 + with: + python-version: "3.10" + + - name: Get full python version + id: full-python-version + run: echo ::set-output name=version::$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") + + - name: Set up Poetry + run: | + pip install poetry + poetry config virtualenvs.in-project true + + - name: Set up cache + uses: actions/cache@v3 + id: cache + with: + path: ./poetry/.venv + key: venv-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('**/poetry.lock') }} + + - name: Ensure cache is healthy + if: steps.cache.outputs.cache-hit == 'true' + working-directory: ./poetry + run: timeout 10s poetry run pip --version >/dev/null 2>&1 || rm -rf .venv + + - name: Switch downstream to development poetry-core + working-directory: ./poetry + run: | + # remove poetry-core from main group to avoid version conflicts + # with a potential entry in the test group + poetry remove poetry-core + # add to test group to overwrite a potential entry in that group + poetry add --lock --group test ../poetry-core + + - name: Install downstream dependencies + working-directory: ./poetry + run: | + # force update of directory dependency in cached venv + # (even if directory dependency with same version is already installed) + poetry run pip uninstall -y poetry-core + poetry install + + # TODO: mark run as success even when this fails and add comment to PR instead + - name: Run downstream test suite + working-directory: ./poetry + run: poetry run pytest diff --git a/.github/workflows/integration.yml b/.github/workflows/integration.yml new file mode 100644 index 0000000..2e50378 --- /dev/null +++ b/.github/workflows/integration.yml @@ -0,0 +1,32 @@ +name: Integration + +on: + pull_request: {} + push: + branches: [main] + +jobs: + Tests: + name: ${{ matrix.os }} / ${{ matrix.python-version }} + runs-on: "${{ matrix.os }}-latest" + strategy: + matrix: + os: [Ubuntu, MacOS, Windows] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + fail-fast: false + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + + - name: Install tox + run: pip install --upgrade tox + + - name: Execute integration tests + run: tox -e integration diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000..578b6f1 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,52 @@ +name: Release + +on: + push: + tags: + - '*.*.*' + +jobs: + Release: + runs-on: ubuntu-latest + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Get tag + id: tag + run: echo ::set-output name=tag::${GITHUB_REF#refs/tags/} + + - name: Set up Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: "3.9" + + - name: Install Poetry + run: | + curl -sSL https://install.python-poetry.org | python - -y + + - name: Update PATH + run: echo "$HOME/.local/bin" >> $GITHUB_PATH + + - name: Build project for distribution + run: poetry build + + - name: Check Version + id: check-version + run: | + [[ "$(poetry version --short)" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]] \ + || echo ::set-output name=prerelease::true + + - name: Create Release + uses: ncipollo/release-action@v1 + with: + artifacts: "dist/*" + token: ${{ secrets.GITHUB_TOKEN }} + draft: false + prerelease: steps.check-version.outputs.prerelease == 'true' + + - name: Publish to PyPI + env: + POETRY_PYPI_TOKEN_PYPI: ${{ secrets.PYPI_TOKEN }} + run: poetry publish diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 0000000..030cc47 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,72 @@ +name: Tests + +on: + pull_request: {} + push: + branches: [main] + +jobs: + tests: + name: ${{ matrix.os }} / ${{ matrix.python-version }} + runs-on: "${{ matrix.os }}-latest" + strategy: + matrix: + os: [Ubuntu, MacOS, Windows] + python-version: ["3.7", "3.8", "3.9", "3.10", "3.11"] + include: + - os: Ubuntu + python-version: pypy-3.8 + fail-fast: false + defaults: + run: + shell: bash + steps: + - uses: actions/checkout@v3 + + - name: Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v3 + with: + python-version: ${{ matrix.python-version }} + + - name: Get full Python version + id: full-python-version + run: echo ::set-output name=version::$(python -c "import sys; print('-'.join(str(v) for v in sys.version_info))") + + - name: Bootstrap poetry + run: | + curl -sSL https://install.python-poetry.org | python - -y + + - name: Update PATH + if: ${{ matrix.os != 'Windows' }} + run: echo "$HOME/.local/bin" >> $GITHUB_PATH + + - name: Update Path for Windows + if: ${{ matrix.os == 'Windows' }} + run: echo "$APPDATA\Python\Scripts" >> $GITHUB_PATH + + - name: Configure poetry + run: poetry config virtualenvs.in-project true + + - name: Set up cache + uses: actions/cache@v3 + id: cache + with: + path: .venv + key: venv-${{ runner.os }}-${{ steps.full-python-version.outputs.version }}-${{ hashFiles('**/poetry.lock') }} + + - name: Ensure cache is healthy + if: steps.cache.outputs.cache-hit == 'true' + run: | + # `timeout` is not available on macOS, so we define a custom function. + [ "$(command -v timeout)" ] || function timeout() { perl -e 'alarm shift; exec @ARGV' "$@"; } + # Using `timeout` is a safeguard against the Poetry command hanging for some reason. + timeout 10s poetry run pip --version || rm -rf .venv + + - name: Install dependencies + run: poetry install + + - name: Run pytest + run: poetry run python -m pytest -p no:sugar -q tests/ + + - name: Run mypy + run: poetry run mypy diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..c2d2a72 --- /dev/null +++ b/.gitignore @@ -0,0 +1,41 @@ +*.pyc + +# Packages +*.egg +!/tests/**/*.egg +/*.egg-info +/tests/fixtures/**/*.egg-info +/dist/* +build +_build +.cache +*.so + +# Installer logs +pip-log.txt + +# Unit test / coverage reports +.coverage +.tox +.pytest_cache + +.DS_Store +.idea/* +.python-version +.vscode/* + +/test.py +/test_*.* + +/setup.cfg +MANIFEST.in +/setup.py +/docs/site/* +/tests/fixtures/simple_project/setup.py +/tests/fixtures/project_with_extras/setup.py +.mypy_cache + +.venv +/releases/* +pip-wheel-metadata +/poetry.toml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..371073d --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,87 @@ +exclude: | + (?x)( + ^tests/.*/fixtures/.* + | ^src/poetry/core/_vendor + ) + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.3.0 + hooks: + - id: trailing-whitespace + exclude: "vendors/patches/jsonschema.patch" + - id: end-of-file-fixer + - id: debug-statements + - id: check-merge-conflict + - id: check-case-conflict + - id: check-json + - id: check-toml + - id: check-yaml + - id: pretty-format-json + args: + - --autofix + - --no-ensure-ascii + - --no-sort-keys + - id: check-ast + - id: debug-statements + - id: check-docstring-first + + - repo: https://github.com/pre-commit/pygrep-hooks + rev: v1.9.0 + hooks: + - id: python-check-mock-methods + - id: python-use-type-annotations + - id: python-check-blanket-noqa + + - repo: https://github.com/asottile/yesqa + rev: v1.4.0 + hooks: + - id: yesqa + additional_dependencies: &flake8_deps + - flake8-annotations==2.9.0 + - flake8-broken-line==0.5.0 + - flake8-bugbear==22.7.1 + - flake8-comprehensions==3.10.0 + - flake8-eradicate==1.3.0 + - flake8-quotes==3.3.1 + - flake8-simplify==0.19.3 + - flake8-tidy-imports==4.8.0 + - flake8-type-checking==2.2.0 + - flake8-typing-imports==1.12.0 + - flake8-use-fstring==1.4 + - pep8-naming==0.13.1 + + - repo: https://github.com/asottile/pyupgrade + rev: v3.2.2 + hooks: + - id: pyupgrade + args: + - --py37-plus + + - repo: https://github.com/hadialqattan/pycln + rev: v2.1.2 + hooks: + - id: pycln + args: [--all] + + - repo: https://github.com/pycqa/isort + rev: 5.10.1 + hooks: + - id: isort + args: [--add-import, from __future__ import annotations] + exclude: | + (?x)( + ^.*/?setup\.py$ + | tests/.*\.pyi$ + ) + + - repo: https://github.com/psf/black + rev: 22.10.0 + hooks: + - id: black + + - repo: https://github.com/pycqa/flake8 + rev: 5.0.4 + hooks: + - id: flake8 + additional_dependencies: *flake8_deps diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..75e64ca --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,476 @@ +# Change Log + +## [1.4.0] - 2022-11-22 + +### Added + +- The PEP 517 `metadata_directory` is now respected as an input to the `build_wheel` hook ([#487](https://github.com/python-poetry/poetry-core/pull/487)). + +### Changed + +- Sources are now considered more carefully when dealing with dependencies with environment markers ([#497](https://github.com/python-poetry/poetry-core/pull/497)). +- `EmptyConstraint` is now hashable ([#513](https://github.com/python-poetry/poetry-core/pull/513)). +- `ParseConstraintError` is now raised on version and constraint parsing errors, and includes information on the package that caused the error ([#514](https://github.com/python-poetry/poetry-core/pull/514)). + +### Fixed + +- Fix an issue where invalid PEP 508 requirements were generated due to a missing space before semicolons ([#510](https://github.com/python-poetry/poetry-core/pull/510)). +- Fix an issue where relative paths were encoded into package requirements, instead of a file:// URL as required by PEP 508 ([#512](https://github.com/python-poetry/poetry-core/pull/512)). + +### Vendoring + +- [`jsonschema==4.17.0`](https://github.com/python-jsonschema/jsonschema/blob/main/CHANGELOG.rst) +- [`lark==1.1.14`](https://github.com/lark-parser/lark/releases/tag/1.1.4) +- [`pyrsistent==0.19.2`](https://github.com/tobgu/pyrsistent/blob/master/CHANGES.txt) +- [`tomlkit==0.11.6`](https://github.com/sdispater/tomlkit/blob/master/CHANGELOG.md) +- [`typing-extensions==4.4.0`](https://github.com/python/typing_extensions/blob/main/CHANGELOG.md) + +## [1.3.2] - 2022-10-07 + +### Fixed + +- Fix an issue where the normalization was not applied to the path of an sdist built using a PEP 517 frontend ([#495](https://github.com/python-poetry/poetry-core/pull/495)). + +## [1.3.1] - 2022-10-05 + +### Fixed + +- Fix an issue where a typing-driven assertion could be false at runtime, causing a failure during prepare_metadata_for_build_wheel ([#492](https://github.com/python-poetry/poetry-core/pull/492)). + +## [1.3.0] - 2022-10-05 + +### Added + +- Add `3.11` to the list of available Python versions ([#477](https://github.com/python-poetry/poetry-core/pull/477)). + +### Changed + +- Deprecate `poetry.core.constraints.generic`, which is replaced by `poetry.core.packages.constraints` ([#482](https://github.com/python-poetry/poetry-core/pull/482)). +- Deprecate `poetry.core.semver`, which is replaced by `poetry.core.constraints.version` ([#482](https://github.com/python-poetry/poetry-core/pull/482)). + +### Fixed + +- Fix an issue where versions were escaped wrongly when building the wheel name ([#469](https://github.com/python-poetry/poetry-core/pull/469)). +- Fix an issue where caret constraints of pre-releases with a major version of 0 resulted in an empty version range ([#475](https://github.com/python-poetry/poetry-core/pull/475)). +- Fix an issue where the names of extras were not normalized according to PEP 685 ([#476](https://github.com/python-poetry/poetry-core/pull/476)). +- Fix an issue where sdist names were not normalized ([#484](https://github.com/python-poetry/poetry-core/pull/484)). + + +## [1.2.0] - 2022-09-13 + +### Added + +- Added support for subdirectories in `url` dependencies ([#398](https://github.com/python-poetry/poetry-core/pull/398)). + +### Changed + +- When setting an invalid version constraint an error is raised instead of silently setting "any version" ([#461](https://github.com/python-poetry/poetry-core/pull/461)). +- Allow more characters in author name ([#411](https://github.com/python-poetry/poetry-core/pull/411)). + +### Fixed + +- Fixed an issue where incorrect `Requires-Dist` information was generated when environment markers where used for optional packages ([#462](https://github.com/python-poetry/poetry-core/pull/462)). +- Fixed an issue where incorrect python constraints were parsed from environment markers ([#457](https://github.com/python-poetry/poetry-core/pull/457)). +- Fixed the hashing of markers and constraints ([#466](https://github.com/python-poetry/poetry-core/pull/466)). +- Fixed an issue where the PEP 508 name of directory dependencies used platform paths ([#463](https://github.com/python-poetry/poetry-core/pull/463)). + + +## [1.1.0] - 2022-08-31 + +- No functional changes. + + +## [1.1.0rc3] - 2022-08-26 + +### Fixed + +- Fixed an issue where a malformed URL was passed to pip when installing from a git subdirectory ([#451](https://github.com/python-poetry/poetry-core/pull/451)). + + +## [1.1.0rc2] - 2022-08-26 + +### Changed + +- Enabled setting `version` of `ProjectPackage` to support dynamically setting the project's package version (e.g. from a plugin) ([#447](https://github.com/python-poetry/poetry-core/pull/447)). + +### Fixed + +- Fixed an issue where `authors` property was not detected ([#437](https://github.com/python-poetry/poetry-core/pull/437)). +- Fixed an issue where submodules of git dependencies was not checked out ([#439](https://github.com/python-poetry/poetry-core/pull/439)). +- Fixed an issue with Python constraints from markers ([#448](https://github.com/python-poetry/poetry-core/pull/448)). +- Fixed an issue where the latest version of git dependency was selected instead of the locked one ([#449](https://github.com/python-poetry/poetry-core/pull/449)). + + +## [1.1.0rc1] - 2022-08-17 + +### Changed + +- Replaced Poetry's helper method `canonicalize_name()` by `packaging.utils.canonicalize_name()` ([#418](https://github.com/python-poetry/poetry-core/pull/418)). +- Removed unused code ([#419](https://github.com/python-poetry/poetry-core/pull/419)). + +### Fixed + +- Fixed an issue with markers, that results in incorrectly resolved extra dependencies ([#415](https://github.com/python-poetry/poetry-core/pull/415)). +- Fixed an issue where equal markers had not the same hash ([#417](https://github.com/python-poetry/poetry-core/pull/417)). +- Fixed `allows_any()` for local versions ([#433](https://github.com/python-poetry/poetry-core/pull/433)). +- Fixed special cases of `next_major()`, `next_minor()`, etc. and deprecated ambiguous usage ([#434](https://github.com/python-poetry/poetry-core/pull/434)). +- Fixed an issue with Python constraints from markers ([#436](https://github.com/python-poetry/poetry-core/pull/436)). + + +## [1.1.0b3] - 2022-07-09 + +### Added + +- Added support for valid PEP 517 projects with another build-system than poetry-core as directory dependencies ([#368](https://github.com/python-poetry/poetry-core/pull/368), [#377](https://github.com/python-poetry/poetry-core/pull/377)). +- Added support for yanked files and releases according to PEP 592 ([#400](https://github.com/python-poetry/poetry-core/pull/400)). + +### Changed + +- Relaxed schema validation to allow additional properties ([#369](https://github.com/python-poetry/poetry-core/pull/369)). +- Harmonized string representation of dependencies ([#393](https://github.com/python-poetry/poetry-core/pull/393)). +- Changed wheel name normalization to follow most recent packaging specification ([#394](https://github.com/python-poetry/poetry-core/pull/394)). +- Changed equality check of direct origin dependencies, so that constraints are not considered anymore ([#405](https://github.com/python-poetry/poetry-core/pull/405)). +- Deprecated `Dependency.set_constraint()` and replaced it by a `constraint` property for consistency ([#370](https://github.com/python-poetry/poetry-core/pull/370)). +- Removed `Package.requires_extras` ([#374](https://github.com/python-poetry/poetry-core/pull/374)). +- Improved marker handling ([#380](https://github.com/python-poetry/poetry-core/pull/380), +[#383](https://github.com/python-poetry/poetry-core/pull/383), +[#384](https://github.com/python-poetry/poetry-core/pull/384), +[#390](https://github.com/python-poetry/poetry-core/pull/390), +[#395](https://github.com/python-poetry/poetry-core/pull/395)). + +### Fixed + +- Fixed hash method for `PackageSpecification`, `Package`, `Dependency` and their sub classes ([#370](https://github.com/python-poetry/poetry-core/pull/370)). +- Fixed merging of markers `python_version` and `python_full_version` ([#382](https://github.com/python-poetry/poetry-core/pull/382), [#388](https://github.com/python-poetry/poetry-core/pull/388)). +- Fixed python version normalization ([#385](https://github.com/python-poetry/poetry-core/pull/385), [#407](https://github.com/python-poetry/poetry-core/pull/407)). +- Fixed an issue where version identifiers with a local version segment allowed non local versions ([#396](https://github.com/python-poetry/poetry-core/pull/396)). +- Fixed an issue where version identifiers without a post release segment allowed post releases ([#396](https://github.com/python-poetry/poetry-core/pull/396)). +- Fixed script definitions that didn't work when extras were not explicitly defined ([#404](https://github.com/python-poetry/poetry-core/pull/404)). + + +## [1.1.0b2] - 2022-05-24 + +### Fixed + +- Fixed a regression where `poetry-core` no longer handled improper Python version constraints from package metadata ([#371](https://github.com/python-poetry/poetry-core/pull/371)) +- Fixed missing version bump in `poetry.core.__version__` ([#367](https://github.com/python-poetry/poetry-core/pull/367)) + +### Improvements + +- `poetry-core` generated wheel's now correctly identify `Generator` metadata as `poetry-core` instead of `poetry` ([#367](https://github.com/python-poetry/poetry-core/pull/367)) + + +## [1.1.0b1] - 2022-05-23 + +### Fixed + +- Fixed an issue where canonicalize package names leads to infinite loops ([#328](https://github.com/python-poetry/poetry-core/pull/328)). +- Fixed an issue where versions wasn't correct normalized to PEP-440 ([#344](https://github.com/python-poetry/poetry-core/pull/344)). +- Fixed an issue with union of multi markers if one marker is a subset of the other marker ([#352](https://github.com/python-poetry/poetry-core/pull/352)). +- Fixed an issue with markers which are not in disjunctive normal form (DNF) ([#347](https://github.com/python-poetry/poetry-core/pull/347)). +- Fixed an issue where stub-only partial namespace packages were not recognized as packages ([#221](https://github.com/python-poetry/poetry-core/pull/221)). +- Fixed an issue where PEP-508 url requirements with extras were not parsed correctly ([#345](https://github.com/python-poetry/poetry-core/pull/345)). +- Fixed an issue where PEP-508 strings with wildcard exclusion constraints were incorrectly exported ([#343](https://github.com/python-poetry/poetry-core/pull/343)). +- Allow hidden directories on Windows bare repos ([#341](https://github.com/python-poetry/poetry-core/pull/341)). +- Fixed an issue where dependencies with an epoch are parsed as empty ([#316](https://github.com/python-poetry/poetry-core/pull/316)). +- Fixed an issue where a package consisting of multiple packages wasn't build correctly ([#292](https://github.com/python-poetry/poetry-core/pull/292)). + +### Added + +- Added support for handling git urls with subdirectory ([#288](https://github.com/python-poetry/poetry-core/pull/288)). +- Added support for metadata files as described in PEP-658 for PEP-503 "simple" API repositories ([#333](https://github.com/python-poetry/poetry-core/pull/333)). + +### Changed + +- Renamed dependency group of runtime dependencies to from `default` to `main` ([#326](https://github.com/python-poetry/poetry-core/pull/326)). + +### Improvements + +- `poetry-core` is now completely type checked. +- Improved the SemVer constraint parsing ([#327](https://github.com/python-poetry/poetry-core/pull/327)). +- Improved the speed when cloning git repositories ([#290](https://github.com/python-poetry/poetry-core/pull/290)). + + +## [1.1.0a7] - 2022-03-05 + +### Fixed + +- Fixed an issue when evaluate `in/not in` markers ([#188](https://github.com/python-poetry/poetry-core/pull/188)). +- Fixed an issue when parsing of caret constraint with leading zero ([#201](https://github.com/python-poetry/poetry-core/pull/201)). +- Respect format for explicit included files when finding excluded files ([#228](https://github.com/python-poetry/poetry-core/pull/228)). +- Fixed an issue where only the last location was used when multiple packages should be included ([#108](https://github.com/python-poetry/poetry-core/pull/108)). +- Ensure that package `description` contains no new line ([#219](https://github.com/python-poetry/poetry-core/pull/219)). +- Fixed an issue where all default dependencies were removed instead of just the selected one ([#220](https://github.com/python-poetry/poetry-core/pull/220)). +- Ensure that authors and maintainers are normalized ([#276](https://github.com/python-poetry/poetry-core/pull/276)). + +### Added + +- Add support for most of the guaranteed hashes ([#207](https://github.com/python-poetry/poetry-core/pull/207)). +- Add support to declare multiple README files ([#248](https://github.com/python-poetry/poetry-core/pull/248)). +- Add support for git sub directories ([#192](https://github.com/python-poetry/poetry-core/pull/192)). +- Add hooks according to PEP-660 for editable installs ([#182](https://github.com/python-poetry/poetry-core/pull/182)). +- Add support for version epochs ([#264](https://github.com/python-poetry/poetry-core/pull/264)). + +### Changed + +- Drop python3.6 support ([#263](https://github.com/python-poetry/poetry-core/pull/263)). +- Loose the strictness when parsing version constraint to support invalid use of wildcards, e.g. `>=3.*` ([#186](https://github.com/python-poetry/poetry-core/pull/186)). +- No longer assume a default git branch name ([#192](https://github.com/python-poetry/poetry-core/pull/192)). +- Sort package name in extras to make it reproducible ([#280](https://github.com/python-poetry/poetry-core/pull/280)). + +### Improvements + +- Improve marker handling ([#208](https://github.com/python-poetry/poetry-core/pull/208), +[#282](https://github.com/python-poetry/poetry-core/pull/282), +[#283](https://github.com/python-poetry/poetry-core/pull/283), +[#284](https://github.com/python-poetry/poetry-core/pull/284), +[#286](https://github.com/python-poetry/poetry-core/pull/286), +[#291](https://github.com/python-poetry/poetry-core/pull/291), +[#293](https://github.com/python-poetry/poetry-core/pull/293), +[#294](https://github.com/python-poetry/poetry-core/pull/294), +[#297](https://github.com/python-poetry/poetry-core/pull/297)). + + +## [1.1.0a6] - 2021-07-30 + +### Added + +- Added support for dependency groups. ([#183](https://github.com/python-poetry/poetry-core/pull/183)) + + +## [1.1.0a5] - 2021-05-21 + +### Added + +- Added support for script files in addition to standard entry points. ([#40](https://github.com/python-poetry/poetry-core/pull/40)) + +### Fixed + +- Fixed an error in the way python markers with a precision >= 3 were handled. ([#178](https://github.com/python-poetry/poetry-core/pull/178)) + + +## [1.1.0a4] - 2021-04-30 + +### Changed + +- Files in source distributions now have a deterministic time to improve reproducibility. ([#142](https://github.com/python-poetry/poetry-core/pull/142)) + +### Fixed + +- Fixed an error where leading zeros in the local build part of version specifications were discarded. ([#167](https://github.com/python-poetry/poetry-core/pull/167)) +- Fixed the PEP 508 representation of file dependencies. ([#153](https://github.com/python-poetry/poetry-core/pull/153)) +- Fixed the copy of `Package` instances which led to file hashes not being available. ([#159](https://github.com/python-poetry/poetry-core/pull/159)) +- Fixed an error in the parsing of caret requirements with a pre-release lower bound. ([#171](https://github.com/python-poetry/poetry-core/pull/171)) +- Fixed an error where some pre-release versions were not flagged as pre-releases. ([#170](https://github.com/python-poetry/poetry-core/pull/170)) + + +## [1.1.0a3] - 2021-04-09 + +### Fixed + +- Fixed dependency markers not being properly copied when changing the constraint ([#162](https://github.com/python-poetry/poetry-core/pull/162)). + + +## [1.1.0a2] - 2021-04-08 + +### Fixed + +- Fixed performance regressions when parsing version constraints ([#152](https://github.com/python-poetry/poetry-core/pull/152)). +- Fixed how local build versions are handled and compared ([#157](https://github.com/python-poetry/poetry-core/pull/157), [#158](https://github.com/python-poetry/poetry-core/pull/158)). +- Fixed errors when parsing some environment markers ([#155](https://github.com/python-poetry/poetry-core/pull/155)). + + +## [1.1.0a1] - 2021-03-30 + +This version is the first to drop support for Python 2.7 and 3.5. + +If you are still using these versions you should update the `requires` property of the `build-system` section +to restrict the version of `poetry-core`: + +```toml +[build-system] +requires = ["poetry-core<1.1.0"] +build-backend = "poetry.core.masonry.api" +``` + +### Changed + +- Dropped support for Python 2.7 and 3.5 ([#131](https://github.com/python-poetry/poetry-core/pull/131)). +- Reorganized imports internally to improve performances ([#131](https://github.com/python-poetry/poetry-core/pull/131)). +- Directory dependencies are now in non-develop mode by default ([#98](https://github.com/python-poetry/poetry-core/pull/98)). +- Improved support for PEP 440 specific versions that do not abide by semantic versioning ([#140](https://github.com/python-poetry/poetry-core/pull/140)). + +### Fixed + +- Fixed path dependencies PEP 508 representation ([#141](https://github.com/python-poetry/poetry-core/pull/141)). + + +## [1.0.2] - 2021-02-05 + +### Fixed + +- Fixed a missing import causing an error in Poetry ([#134](https://github.com/python-poetry/poetry-core/pull/134)). + + +## [1.0.1] - 2021-02-05 + +### Fixed + +- Fixed PEP 508 representation of dependency without extras ([#102](https://github.com/python-poetry/poetry-core/pull/102)). +- Fixed an error where development dependencies were being resolved when invoking the PEP-517 backend ([#101](https://github.com/python-poetry/poetry-core/pull/101)). +- Fixed source distribution not being deterministic ([#105](https://github.com/python-poetry/poetry-core/pull/105)). +- Fixed an error where zip files were left open when building wheels ([#122](https://github.com/python-poetry/poetry-core/pull/122)). +- Fixed an error where explicitly included files were still not present in final distributions ([#124](https://github.com/python-poetry/poetry-core/pull/124)). +- Fixed wheel filename matching for recent architecture ([#125](https://github.com/python-poetry/poetry-core/pull/125), [#129](https://github.com/python-poetry/poetry-core/pull/129)). +- Fixed an error where the `&` character was not accepted for author names ([#120](https://github.com/python-poetry/poetry-core/pull/120)). +- Fixed the PEP-508 representation of some dependencies ([#103](https://github.com/python-poetry/poetry-core/pull/103)). +- Fixed the `Requires-Python` metadata generation ([#127](https://github.com/python-poetry/poetry-core/pull/127)). +- Fixed an error where pre-release versions were accepted in version constraints ([#128](https://github.com/python-poetry/poetry-core/pull/128)). + + +## [1.0.0] - 2020-09-30 + +No changes. + + +## [1.0.0rc3] - 2020-09-30 + +### Changed + +- Removed `intreehooks` build backend in favor of the `backend-path` mechanism ([#90](https://github.com/python-poetry/poetry-core/pull/90)). +- Directory dependencies will now always use a posix path for their representation ([#90](https://github.com/python-poetry/poetry-core/pull/91)). +- Dependency constraints can now be set directly via a proper setter ([#90](https://github.com/python-poetry/poetry-core/pull/90)). + + +## [1.0.0rc2] - 2020-09-25 + +### Fixed + +- Fixed `python_full_version` markers conversion to version constraints ([#86](https://github.com/python-poetry/core/pull/86)). + + +## [1.0.0rc1] - 2020-09-25 + +### Fixed + +- Fixed Python constraint propagation when converting a package to a dependency ([#84](https://github.com/python-poetry/core/pull/84)). +- Fixed VCS ignored files being included in wheel distributions for projects using the `src` layout ([#81](https://github.com/python-poetry/core/pull/81)) + + +## [1.0.0b1] - 2020-09-18 + +### Added + +- Added support for build executable for wheels ([#72](https://github.com/python-poetry/core/pull/72)). + +### Changed + +- Improved packages with sources equality comparison ([#53](https://github.com/python-poetry/core/pull/53)). +- Improved licenses handling and packaging in builders ([#57](https://github.com/python-poetry/core/pull/57)). +- Refactored packages and dependencies classes to improve comparison between bare packages and packages with extras ([#78](https://github.com/python-poetry/core/pull/78)). + +### Fixed + +- Fixed PEP-508 representation of URL dependencies ([#60](https://github.com/python-poetry/core/pull/60)). +- Fixed generated `RECORD` files in some cases by ensuring it's a valid CSV file ([#61](https://github.com/python-poetry/core/pull/61)). +- Fixed an error when parsing some version constraints if they contained wildcard elements ([#56](https://github.com/python-poetry/core/pull/56)). +- Fixed errors when using the `exclude` property ([#62](https://github.com/python-poetry/core/pull/62)). +- Fixed the way git revisions are retrieved ([#69](https://github.com/python-poetry/core/pull/69)). +- Fixed dependency constraint PEP-508 compatibility when generating metadata ([#79](https://github.com/python-poetry/core/pull/79)). +- Fixed potential errors on Python 3.5 when building with the `include` property set ([#75](https://github.com/python-poetry/core/pull/75)). + + +## [1.0.0a9] - 2020-07-24 + +### Added + +- Added support for build scripts without `setup.py` generation ([#45](https://github.com/python-poetry/core/pull/45)). + +### Changed + +- Improved the parsing of requirements and environment markers ([#44](https://github.com/python-poetry/core/pull/44)). + +### Fixed + +- Fixed the default value used for the `build.generate-setup-file` settings ([#43](https://github.com/python-poetry/core/pull/43)). +- Fixed error messages when the authors specified in the pyproject.toml file are invalid ([#49](https://github.com/python-poetry/core/pull/49)). +- Fixed distributions build when using the PEP-517 backend for packages with includes ([#47](https://github.com/python-poetry/core/pull/47)). + + +## [1.0.0a8] - 2020-06-26 + +### Fixed + +- Fixed errors in the way Python environment markers were parsed and generated ([#36](https://github.com/python-poetry/core/pull/36)). + + +## [1.0.0a7] - 2020-05-06 + +### Added + +- Added support for format-specific includes via the `include` property ([#6](https://github.com/python-poetry/core/pull/6)). + +### Changed + +- Allow url dependencies in multiple constraints dependencies ([#32](https://github.com/python-poetry/core/pull/32)). + +### Fixed + +- Fixed PEP 508 representation and parsing of VCS dependencies ([#30](https://github.com/python-poetry/core/pull/30)). + + +## [1.0.0a6] - 2020-04-24 + +### Added + +- Added support for markers inverse ([#21](https://github.com/python-poetry/core/pull/21)). +- Added support for specifying that `git` dependencies should be installed in develop mode ([#23](https://github.com/python-poetry/core/pull/23)). +- Added the ability to specify build settings from the Poetry main configuration file ([#26](https://github.com/python-poetry/core/pull/26)). +- Added the ability to disable the generation of the `setup.py` file when building ([#26](https://github.com/python-poetry/core/pull/26)). + +### Changed + +- Relaxed licence restrictions to support custom licences ([#5](https://github.com/python-poetry/core/pull/5)). +- Improved support for PEP-440 direct references ([#22](https://github.com/python-poetry/core/pull/22)). +- Improved dependency vendoring ([#25](https://github.com/python-poetry/core/pull/25)). + +### Fixed + +- Fixed the inability to make the url dependencies optional ([#13](https://github.com/python-poetry/core/pull/13)). +- Fixed whitespaces in PEP-440 constraints causing an error ([#16](https://github.com/python-poetry/core/pull/16)). +- Fixed subpackage check when generating the `setup.py` file ([#17](https://github.com/python-poetry/core/pull/17)). +- Fix PEP-517 issues for projects using build scripts ([#12](https://github.com/python-poetry/core/pull/12)). +- Fixed support for stub-only packages ([#28](https://github.com/python-poetry/core/pull/28)). + + +[Unreleased]: https://github.com/python-poetry/poetry-core/compare/1.4.0...main +[1.4.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.4.0 +[1.3.2]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.2 +[1.3.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.1 +[1.3.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.3.0 +[1.2.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.2.0 +[1.1.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0 +[1.1.0rc3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc3 +[1.1.0rc2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc2 +[1.1.0rc1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0rc1 +[1.1.0b3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b3 +[1.1.0b2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b2 +[1.1.0b1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0b1 +[1.1.0a7]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a7 +[1.1.0a6]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a6 +[1.1.0a5]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a5 +[1.1.0a4]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a4 +[1.1.0a3]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a3 +[1.1.0a2]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a2 +[1.1.0a1]: https://github.com/python-poetry/poetry-core/releases/tag/1.1.0a1 +[1.0.2]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.2 +[1.0.1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.1 +[1.0.0]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0 +[1.0.0rc3]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc3 +[1.0.0rc2]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc2 +[1.0.0rc1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0rc1 +[1.0.0b1]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0b1 +[1.0.0a9]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a9 +[1.0.0a8]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a8 +[1.0.0a7]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a7 +[1.0.0a6]: https://github.com/python-poetry/poetry-core/releases/tag/1.0.0a6 diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..230c44f --- /dev/null +++ b/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2020 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..dd7e1f9 --- /dev/null +++ b/Makefile @@ -0,0 +1,30 @@ +SHELL := $(shell which bash) -e +MAKEFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST))) +ROOT_DIR := $(patsubst %/,%,$(dir $(MAKEFILE_PATH))) + +VENDOR_SRC := $(ROOT_DIR)/vendors +VENDOR_DIR := $(ROOT_DIR)/src/poetry/core/_vendor +VENDOR_TXT := $(VENDOR_DIR)/vendor.txt +POETRY_BIN ?= $(shell which poetry) + +.PHONY: vendor/lock +vendor/lock: $(VENDOR_LOCK) + # regenerate lock file + @pushd $(VENDOR_SRC) && $(POETRY_BIN) lock --no-update + +.PHONY: vendor/sync +vendor/sync: + # regenerate vendor.txt file (exported from lockfile) + @pushd $(VENDOR_SRC) && $(POETRY_BIN) export --without-hashes 2> /dev/null \ + | grep -E -v "(importlib|zipp)" \ + | sort > $(VENDOR_TXT) + + # vendor packages + @$(POETRY_BIN) run vendoring sync + + # strip out *.pyi stubs + @find "$(VENDOR_DIR)" -type f -name "*.pyi" -exec rm {} \; + +.PHONY: vendor/update +vendor/update: | vendor/lock vendor/sync + @: diff --git a/README.md b/README.md new file mode 100644 index 0000000..4b55592 --- /dev/null +++ b/README.md @@ -0,0 +1,45 @@ +# Poetry Core +[![PyPI version](https://img.shields.io/pypi/v/poetry-core)](https://pypi.org/project/poetry-core/) +[![Python Versions](https://img.shields.io/pypi/pyversions/poetry-core)](https://pypi.org/project/poetry-core/) +[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT) +[![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) +[![](https://github.com/python-poetry/poetry-core/workflows/Tests/badge.svg)](https://github.com/python-poetry/poetry-core/actions?query=workflow%3ATests) + +A [PEP 517](https://www.python.org/dev/peps/pep-0517/) build backend implementation developed for +[Poetry](https://github.com/python-poetry/poetry). This project is intended to be a light weight, fully compliant, +self-contained package allowing PEP 517 compatible build frontends to build Poetry managed projects. + +## Usage +In most cases, the usage of this package is transparent to the end-user as it is either made use by Poetry itself +or a PEP 517 frontend (eg: `pip`). + +In order to enable the use `poetry-core` as your build backend, the following snippet must be present in your +project's `pyproject.toml` file. + +```toml +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" +``` + +Once this is present, a PEP 517 frontend like `pip` can build and install your project from source without the need +for Poetry or any of its dependencies. + +```shell +# install to current environment +pip install /path/to/poetry/managed/project + +# build a wheel package +pip wheel /path/to/poetry/managed/project +``` + +## Why is this required? +Prior to the release of version `1.1.0`, Poetry was a project management tool that included a PEP 517 +build backend. This was inefficient and time consuming when a PEP 517 build was required. For example, +both `pip` and `tox` (with isolated builds) would install Poetry and all dependencies it required. Most of these +dependencies are not required when the objective is to simply build either a source or binary distribution of your +project. + +In order to improve the above situation, `poetry-core` was created. Shared functionality pertaining to PEP 517 build +backends, including reading lock file, `pyproject.toml` and building wheel/sdist, were implemented in this package. This +makes PEP 517 builds extremely fast for Poetry managed packages. diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 0000000..fab06e5 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,917 @@ +[[package]] +name = "atomicwrites" +version = "1.4.0" +description = "Atomic file writes." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" + +[[package]] +name = "attrs" +version = "21.4.0" +description = "Classes Without Boilerplate" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[package.extras] +dev = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "furo", "sphinx", "sphinx-notfound-page", "pre-commit", "cloudpickle"] +docs = ["furo", "sphinx", "zope.interface", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "zope.interface", "cloudpickle"] +tests_no_zope = ["coverage[toml] (>=5.0.2)", "hypothesis", "pympler", "pytest (>=4.3.0)", "six", "mypy", "pytest-mypy-plugins", "cloudpickle"] + +[[package]] +name = "build" +version = "0.7.0" +description = "A simple, correct PEP517 package builder" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +colorama = {version = "*", markers = "os_name == \"nt\""} +importlib-metadata = {version = ">=0.22", markers = "python_version < \"3.8\""} +packaging = ">=19.0" +pep517 = ">=0.9.1" +tomli = ">=1.0.0" + +[package.extras] +docs = ["furo (>=2020.11.19b18)", "sphinx (>=3.0,<4.0)", "sphinx-argparse-cli (>=1.5)", "sphinx-autodoc-typehints (>=1.10)"] +test = ["filelock (>=3)", "pytest (>=6.2.4)", "pytest-cov (>=2)", "pytest-mock (>=2)", "pytest-rerunfailures (>=9.1)", "pytest-xdist (>=1.34)", "setuptools (>=42.0.0)", "toml (>=0.10.0)", "wheel (>=0.36.0)"] +typing = ["importlib-metadata (>=4.6.4)", "mypy (==0.910)", "typing-extensions (>=3.7.4.3)"] +virtualenv = ["virtualenv (>=20.0.35)"] + +[[package]] +name = "certifi" +version = "2021.10.8" +description = "Python package for providing Mozilla's CA Bundle." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "cfgv" +version = "3.3.1" +description = "Validate configuration and produce human readable error messages." +category = "dev" +optional = false +python-versions = ">=3.6.1" + +[[package]] +name = "charset-normalizer" +version = "2.0.12" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +category = "dev" +optional = false +python-versions = ">=3.5.0" + +[package.extras] +unicode_backport = ["unicodedata2"] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "colorama" +version = "0.4.4" +description = "Cross-platform colored terminal text." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "commonmark" +version = "0.9.1" +description = "Python parser for the CommonMark Markdown spec" +category = "dev" +optional = false +python-versions = "*" + +[package.extras] +test = ["flake8 (==3.7.8)", "hypothesis (==3.55.3)"] + +[[package]] +name = "coverage" +version = "6.4" +description = "Code coverage measurement for Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +tomli = {version = "*", optional = true, markers = "python_version < \"3.11\" and extra == \"toml\""} + +[package.extras] +toml = ["tomli"] + +[[package]] +name = "distlib" +version = "0.3.4" +description = "Distribution utilities" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "filelock" +version = "3.7.0" +description = "A platform independent file lock." +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo (>=2021.8.17b43)", "sphinx (>=4.1)", "sphinx-autodoc-typehints (>=1.12)"] +testing = ["covdefaults (>=1.2.0)", "coverage (>=4)", "pytest (>=4)", "pytest-cov", "pytest-timeout (>=1.4.2)"] + +[[package]] +name = "identify" +version = "2.5.0" +description = "File identification library for Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +license = ["ukkonen"] + +[[package]] +name = "idna" +version = "3.3" +description = "Internationalized Domain Names in Applications (IDNA)" +category = "dev" +optional = false +python-versions = ">=3.5" + +[[package]] +name = "importlib-metadata" +version = "4.11.3" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +perf = ["ipython"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "packaging", "pyfakefs", "flufl.flake8", "pytest-perf (>=0.9.2)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)", "importlib-resources (>=1.3)"] + +[[package]] +name = "importlib-resources" +version = "5.7.1" +description = "Read resources from Python packages" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "iniconfig" +version = "1.1.1" +description = "iniconfig: brain-dead simple config-ini parsing" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "jsonschema" +version = "4.5.1" +description = "An implementation of JSON Schema validation for Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +attrs = ">=17.4.0" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format_nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "mypy" +version = "0.960" +description = "Optional static typing for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +mypy-extensions = ">=0.4.3" +tomli = {version = ">=1.1.0", markers = "python_version < \"3.11\""} +typed-ast = {version = ">=1.4.0,<2", markers = "python_version < \"3.8\""} +typing-extensions = ">=3.10" + +[package.extras] +dmypy = ["psutil (>=4.0)"] +python2 = ["typed-ast (>=1.4.0,<2)"] +reports = ["lxml"] + +[[package]] +name = "mypy-extensions" +version = "0.4.3" +description = "Experimental type system extensions for programs checked with the mypy typechecker." +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "nodeenv" +version = "1.6.0" +description = "Node.js virtual environment builder" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + +[[package]] +name = "pep517" +version = "0.12.0" +description = "Wrappers to build Python packages using PEP 517 hooks" +category = "dev" +optional = false +python-versions = "*" + +[package.dependencies] +importlib_metadata = {version = "*", markers = "python_version < \"3.8\""} +tomli = {version = ">=1.1.0", markers = "python_version >= \"3.6\""} +zipp = {version = "*", markers = "python_version < \"3.8\""} + +[[package]] +name = "platformdirs" +version = "2.5.2" +description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo (>=2021.7.5b38)", "proselint (>=0.10.2)", "sphinx-autodoc-typehints (>=1.12)", "sphinx (>=4)"] +test = ["appdirs (==1.4.4)", "pytest-cov (>=2.7)", "pytest-mock (>=3.6)", "pytest (>=6)"] + +[[package]] +name = "pluggy" +version = "1.0.0" +description = "plugin and hook calling mechanisms for python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "pre-commit" +version = "2.19.0" +description = "A framework for managing and maintaining multi-language pre-commit hooks." +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +cfgv = ">=2.0.0" +identify = ">=1.0.0" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +nodeenv = ">=0.11.1" +pyyaml = ">=5.1" +toml = "*" +virtualenv = ">=20.0.8" + +[[package]] +name = "py" +version = "1.11.0" +description = "library with cross-python path, ini-parsing, io, code, log facilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" + +[[package]] +name = "pygments" +version = "2.12.0" +description = "Pygments is a syntax highlighting package written in Python." +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "pyparsing" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "dev" +optional = false +python-versions = ">=3.6.8" + +[package.extras] +diagrams = ["railroad-diagrams", "jinja2"] + +[[package]] +name = "pyrsistent" +version = "0.18.1" +description = "Persistent/Functional/Immutable data structures" +category = "dev" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "pytest" +version = "7.1.2" +description = "pytest: simple powerful testing with Python" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +atomicwrites = {version = ">=1.0", markers = "sys_platform == \"win32\""} +attrs = ">=19.2.0" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +py = ">=1.8.2" +tomli = ">=1.0.0" + +[package.extras] +testing = ["argcomplete", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "xmlschema"] + +[[package]] +name = "pytest-cov" +version = "3.0.0" +description = "Pytest plugin for measuring coverage." +category = "dev" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +coverage = {version = ">=5.2.1", extras = ["toml"]} +pytest = ">=4.6" + +[package.extras] +testing = ["fields", "hunter", "process-tests", "six", "pytest-xdist", "virtualenv"] + +[[package]] +name = "pytest-mock" +version = "3.7.0" +description = "Thin-wrapper around the mock package for easier use with pytest" +category = "dev" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "tox", "pytest-asyncio"] + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "requests" +version = "2.27.1" +description = "Python HTTP for Humans." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = {version = ">=2.0.0,<2.1.0", markers = "python_version >= \"3\""} +idna = {version = ">=2.5,<4", markers = "python_version >= \"3\""} +urllib3 = ">=1.21.1,<1.27" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)", "win-inet-pton"] +use_chardet_on_py3 = ["chardet (>=3.0.2,<5)"] + +[[package]] +name = "rich" +version = "12.4.1" +description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" +category = "dev" +optional = false +python-versions = ">=3.6.3,<4.0.0" + +[package.dependencies] +commonmark = ">=0.9.0,<0.10.0" +pygments = ">=2.6.0,<3.0.0" +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} + +[package.extras] +jupyter = ["ipywidgets (>=7.5.1,<8.0.0)"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "toml" +version = "0.10.2" +description = "Python Library for Tom's Obvious, Minimal Language" +category = "dev" +optional = false +python-versions = ">=2.6, !=3.0.*, !=3.1.*, !=3.2.*" + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +category = "dev" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "tox" +version = "3.25.0" +description = "tox is a generic virtualenv management and test command line tool" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[package.dependencies] +colorama = {version = ">=0.4.1", markers = "platform_system == \"Windows\""} +filelock = ">=3.0.0" +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +packaging = ">=14" +pluggy = ">=0.12.0" +py = ">=1.4.17" +six = ">=1.14.0" +toml = ">=0.9.4" +virtualenv = ">=16.0.0,<20.0.0 || >20.0.0,<20.0.1 || >20.0.1,<20.0.2 || >20.0.2,<20.0.3 || >20.0.3,<20.0.4 || >20.0.4,<20.0.5 || >20.0.5,<20.0.6 || >20.0.6,<20.0.7 || >20.0.7" + +[package.extras] +docs = ["pygments-github-lexers (>=0.0.5)", "sphinx (>=2.0.0)", "sphinxcontrib-autoprogram (>=0.1.5)", "towncrier (>=18.5.0)"] +testing = ["flaky (>=3.4.0)", "freezegun (>=0.3.11)", "pytest (>=4.0.0)", "pytest-cov (>=2.5.1)", "pytest-mock (>=1.10.0)", "pytest-randomly (>=1.0.0)", "psutil (>=5.6.1)", "pathlib2 (>=2.3.3)"] + +[[package]] +name = "typed-ast" +version = "1.5.4" +description = "a fork of Python 2 and 3 ast modules with type comment support" +category = "dev" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "types-jsonschema" +version = "4.4.4" +description = "Typing stubs for jsonschema" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "types-setuptools" +version = "57.4.14" +description = "Typing stubs for setuptools" +category = "dev" +optional = false +python-versions = "*" + +[[package]] +name = "typing-extensions" +version = "4.2.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "urllib3" +version = "1.26.9" +description = "HTTP library with thread-safe connection pooling, file post, and more." +category = "dev" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4" + +[package.extras] +brotli = ["brotlicffi (>=0.8.0)", "brotli (>=1.0.9)", "brotlipy (>=0.6.0)"] +secure = ["pyOpenSSL (>=0.14)", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "certifi", "ipaddress"] +socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] + +[[package]] +name = "vendoring" +version = "1.2.0" +description = "A command line tool, to simplify vendoring pure Python dependencies." +category = "dev" +optional = false +python-versions = "~= 3.8" + +[package.dependencies] +click = "*" +jsonschema = "*" +packaging = "*" +requests = "*" +rich = "*" +toml = "*" + +[package.extras] +doc = ["sphinx"] +test = ["pytest", "pytest-cov", "pytest-mock"] + +[[package]] +name = "virtualenv" +version = "20.14.1" +description = "Virtual Python Environment builder" +category = "dev" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" + +[package.dependencies] +distlib = ">=0.3.1,<1" +filelock = ">=3.2,<4" +importlib-metadata = {version = ">=0.12", markers = "python_version < \"3.8\""} +platformdirs = ">=2,<3" +six = ">=1.9.0,<2" + +[package.extras] +docs = ["proselint (>=0.10.2)", "sphinx (>=3)", "sphinx-argparse (>=0.2.5)", "sphinx-rtd-theme (>=0.4.3)", "towncrier (>=21.3)"] +testing = ["coverage (>=4)", "coverage-enable-subprocess (>=1)", "flaky (>=3)", "pytest (>=4)", "pytest-env (>=0.6.2)", "pytest-freezegun (>=0.4.1)", "pytest-mock (>=2)", "pytest-randomly (>=1)", "pytest-timeout (>=1)", "packaging (>=20.0)"] + +[[package]] +name = "zipp" +version = "3.8.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["sphinx", "jaraco.packaging (>=9)", "rst.linker (>=1.9)"] +testing = ["pytest (>=6)", "pytest-checkdocs (>=2.4)", "pytest-flake8", "pytest-cov", "pytest-enabler (>=1.0.1)", "jaraco.itertools", "func-timeout", "pytest-black (>=0.3.7)", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "1.1" +python-versions = "^3.7" +content-hash = "942983e12963ee3294081a5f38b6a66034dc7cd350b48a65f21e706a77f160d7" + +[metadata.files] +atomicwrites = [ + {file = "atomicwrites-1.4.0-py2.py3-none-any.whl", hash = "sha256:6d1784dea7c0c8d4a5172b6c620f40b6e4cbfdf96d783691f2e1302a7b88e197"}, + {file = "atomicwrites-1.4.0.tar.gz", hash = "sha256:ae70396ad1a434f9c7046fd2dd196fc04b12f9e91ffb859164193be8b6168a7a"}, +] +attrs = [ + {file = "attrs-21.4.0-py2.py3-none-any.whl", hash = "sha256:2d27e3784d7a565d36ab851fe94887c5eccd6a463168875832a1be79c82828b4"}, + {file = "attrs-21.4.0.tar.gz", hash = "sha256:626ba8234211db98e869df76230a137c4c40a12d72445c45d5f5b716f076e2fd"}, +] +build = [ + {file = "build-0.7.0-py3-none-any.whl", hash = "sha256:21b7ebbd1b22499c4dac536abc7606696ea4d909fd755e00f09f3c0f2c05e3c8"}, + {file = "build-0.7.0.tar.gz", hash = "sha256:1aaadcd69338252ade4f7ec1265e1a19184bf916d84c9b7df095f423948cb89f"}, +] +certifi = [ + {file = "certifi-2021.10.8-py2.py3-none-any.whl", hash = "sha256:d62a0163eb4c2344ac042ab2bdf75399a71a2d8c7d47eac2e2ee91b9d6339569"}, + {file = "certifi-2021.10.8.tar.gz", hash = "sha256:78884e7c1d4b00ce3cea67b44566851c4343c120abd683433ce934a68ea58872"}, +] +cfgv = [ + {file = "cfgv-3.3.1-py2.py3-none-any.whl", hash = "sha256:c6a0883f3917a037485059700b9e75da2464e6c27051014ad85ba6aaa5884426"}, + {file = "cfgv-3.3.1.tar.gz", hash = "sha256:f5a830efb9ce7a445376bb66ec94c638a9787422f96264c98edc6bdeed8ab736"}, +] +charset-normalizer = [ + {file = "charset-normalizer-2.0.12.tar.gz", hash = "sha256:2857e29ff0d34db842cd7ca3230549d1a697f96ee6d3fb071cfa6c7393832597"}, + {file = "charset_normalizer-2.0.12-py3-none-any.whl", hash = "sha256:6881edbebdb17b39b4eaaa821b438bf6eddffb4468cf344f09f89def34a8b1df"}, +] +click = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] +colorama = [ + {file = "colorama-0.4.4-py2.py3-none-any.whl", hash = "sha256:9f47eda37229f68eee03b24b9748937c7dc3868f906e8ba69fbcbdd3bc5dc3e2"}, + {file = "colorama-0.4.4.tar.gz", hash = "sha256:5941b2b48a20143d2267e95b1c2a7603ce057ee39fd88e7329b0c292aa16869b"}, +] +commonmark = [ + {file = "commonmark-0.9.1-py2.py3-none-any.whl", hash = "sha256:da2f38c92590f83de410ba1a3cbceafbc74fee9def35f9251ba9a971d6d66fd9"}, + {file = "commonmark-0.9.1.tar.gz", hash = "sha256:452f9dc859be7f06631ddcb328b6919c67984aca654e5fefb3914d54691aed60"}, +] +coverage = [ + {file = "coverage-6.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:50ed480b798febce113709846b11f5d5ed1e529c88d8ae92f707806c50297abf"}, + {file = "coverage-6.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:26f8f92699756cb7af2b30720de0c5bb8d028e923a95b6d0c891088025a1ac8f"}, + {file = "coverage-6.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:60c2147921da7f4d2d04f570e1838db32b95c5509d248f3fe6417e91437eaf41"}, + {file = "coverage-6.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:750e13834b597eeb8ae6e72aa58d1d831b96beec5ad1d04479ae3772373a8088"}, + {file = "coverage-6.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af5b9ee0fc146e907aa0f5fb858c3b3da9199d78b7bb2c9973d95550bd40f701"}, + {file = "coverage-6.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:a022394996419142b33a0cf7274cb444c01d2bb123727c4bb0b9acabcb515dea"}, + {file = "coverage-6.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5a78cf2c43b13aa6b56003707c5203f28585944c277c1f3f109c7b041b16bd39"}, + {file = "coverage-6.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9229d074e097f21dfe0643d9d0140ee7433814b3f0fc3706b4abffd1e3038632"}, + {file = "coverage-6.4-cp310-cp310-win32.whl", hash = "sha256:fb45fe08e1abc64eb836d187b20a59172053999823f7f6ef4f18a819c44ba16f"}, + {file = "coverage-6.4-cp310-cp310-win_amd64.whl", hash = "sha256:3cfd07c5889ddb96a401449109a8b97a165be9d67077df6802f59708bfb07720"}, + {file = "coverage-6.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:03014a74023abaf5a591eeeaf1ac66a73d54eba178ff4cb1fa0c0a44aae70383"}, + {file = "coverage-6.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c82f2cd69c71698152e943f4a5a6b83a3ab1db73b88f6e769fabc86074c3b08"}, + {file = "coverage-6.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7b546cf2b1974ddc2cb222a109b37c6ed1778b9be7e6b0c0bc0cf0438d9e45a6"}, + {file = "coverage-6.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc173f1ce9ffb16b299f51c9ce53f66a62f4d975abe5640e976904066f3c835d"}, + {file = "coverage-6.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c53ad261dfc8695062fc8811ac7c162bd6096a05a19f26097f411bdf5747aee7"}, + {file = "coverage-6.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:eef5292b60b6de753d6e7f2d128d5841c7915fb1e3321c3a1fe6acfe76c38052"}, + {file = "coverage-6.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:543e172ce4c0de533fa892034cce260467b213c0ea8e39da2f65f9a477425211"}, + {file = "coverage-6.4-cp37-cp37m-win32.whl", hash = "sha256:00c8544510f3c98476bbd58201ac2b150ffbcce46a8c3e4fb89ebf01998f806a"}, + {file = "coverage-6.4-cp37-cp37m-win_amd64.whl", hash = "sha256:b84ab65444dcc68d761e95d4d70f3cfd347ceca5a029f2ffec37d4f124f61311"}, + {file = "coverage-6.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:d548edacbf16a8276af13063a2b0669d58bbcfca7c55a255f84aac2870786a61"}, + {file = "coverage-6.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:033ebec282793bd9eb988d0271c211e58442c31077976c19c442e24d827d356f"}, + {file = "coverage-6.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:742fb8b43835078dd7496c3c25a1ec8d15351df49fb0037bffb4754291ef30ce"}, + {file = "coverage-6.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d55fae115ef9f67934e9f1103c9ba826b4c690e4c5bcf94482b8b2398311bf9c"}, + {file = "coverage-6.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5cd698341626f3c77784858427bad0cdd54a713115b423d22ac83a28303d1d95"}, + {file = "coverage-6.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:62d382f7d77eeeaff14b30516b17bcbe80f645f5cf02bb755baac376591c653c"}, + {file = "coverage-6.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:016d7f5cf1c8c84f533a3c1f8f36126fbe00b2ec0ccca47cc5731c3723d327c6"}, + {file = "coverage-6.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:69432946f154c6add0e9ede03cc43b96e2ef2733110a77444823c053b1ff5166"}, + {file = "coverage-6.4-cp38-cp38-win32.whl", hash = "sha256:83bd142cdec5e4a5c4ca1d4ff6fa807d28460f9db919f9f6a31babaaa8b88426"}, + {file = "coverage-6.4-cp38-cp38-win_amd64.whl", hash = "sha256:4002f9e8c1f286e986fe96ec58742b93484195defc01d5cc7809b8f7acb5ece3"}, + {file = "coverage-6.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:e4f52c272fdc82e7c65ff3f17a7179bc5f710ebc8ce8a5cadac81215e8326740"}, + {file = "coverage-6.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b5578efe4038be02d76c344007b13119b2b20acd009a88dde8adec2de4f630b5"}, + {file = "coverage-6.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d8099ea680201c2221f8468c372198ceba9338a5fec0e940111962b03b3f716a"}, + {file = "coverage-6.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a00441f5ea4504f5abbc047589d09e0dc33eb447dc45a1a527c8b74bfdd32c65"}, + {file = "coverage-6.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e76bd16f0e31bc2b07e0fb1379551fcd40daf8cdf7e24f31a29e442878a827c"}, + {file = "coverage-6.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:8d2e80dd3438e93b19e1223a9850fa65425e77f2607a364b6fd134fcd52dc9df"}, + {file = "coverage-6.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:341e9c2008c481c5c72d0e0dbf64980a4b2238631a7f9780b0fe2e95755fb018"}, + {file = "coverage-6.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:21e6686a95025927775ac501e74f5940cdf6fe052292f3a3f7349b0abae6d00f"}, + {file = "coverage-6.4-cp39-cp39-win32.whl", hash = "sha256:968ed5407f9460bd5a591cefd1388cc00a8f5099de9e76234655ae48cfdbe2c3"}, + {file = "coverage-6.4-cp39-cp39-win_amd64.whl", hash = "sha256:e35217031e4b534b09f9b9a5841b9344a30a6357627761d4218818b865d45055"}, + {file = "coverage-6.4-pp36.pp37.pp38-none-any.whl", hash = "sha256:e637ae0b7b481905358624ef2e81d7fb0b1af55f5ff99f9ba05442a444b11e45"}, + {file = "coverage-6.4.tar.gz", hash = "sha256:727dafd7f67a6e1cad808dc884bd9c5a2f6ef1f8f6d2f22b37b96cb0080d4f49"}, +] +distlib = [ + {file = "distlib-0.3.4-py2.py3-none-any.whl", hash = "sha256:6564fe0a8f51e734df6333d08b8b94d4ea8ee6b99b5ed50613f731fd4089f34b"}, + {file = "distlib-0.3.4.zip", hash = "sha256:e4b58818180336dc9c529bfb9a0b58728ffc09ad92027a3f30b7cd91e3458579"}, +] +filelock = [ + {file = "filelock-3.7.0-py3-none-any.whl", hash = "sha256:c7b5fdb219b398a5b28c8e4c1893ef5f98ece6a38c6ab2c22e26ec161556fed6"}, + {file = "filelock-3.7.0.tar.gz", hash = "sha256:b795f1b42a61bbf8ec7113c341dad679d772567b936fbd1bf43c9a238e673e20"}, +] +identify = [ + {file = "identify-2.5.0-py2.py3-none-any.whl", hash = "sha256:3acfe15a96e4272b4ec5662ee3e231ceba976ef63fd9980ed2ce9cc415df393f"}, + {file = "identify-2.5.0.tar.gz", hash = "sha256:c83af514ea50bf2be2c4a3f2fb349442b59dc87284558ae9ff54191bff3541d2"}, +] +idna = [ + {file = "idna-3.3-py3-none-any.whl", hash = "sha256:84d9dd047ffa80596e0f246e2eab0b391788b0503584e8945f2368256d2735ff"}, + {file = "idna-3.3.tar.gz", hash = "sha256:9d643ff0a55b762d5cdb124b8eaa99c66322e2157b69160bc32796e824360e6d"}, +] +importlib-metadata = [ + {file = "importlib_metadata-4.11.3-py3-none-any.whl", hash = "sha256:1208431ca90a8cca1a6b8af391bb53c1a2db74e5d1cef6ddced95d4b2062edc6"}, + {file = "importlib_metadata-4.11.3.tar.gz", hash = "sha256:ea4c597ebf37142f827b8f39299579e31685c31d3a438b59f469406afd0f2539"}, +] +importlib-resources = [ + {file = "importlib_resources-5.7.1-py3-none-any.whl", hash = "sha256:e447dc01619b1e951286f3929be820029d48c75eb25d265c28b92a16548212b8"}, + {file = "importlib_resources-5.7.1.tar.gz", hash = "sha256:b6062987dfc51f0fcb809187cffbd60f35df7acb4589091f154214af6d0d49d3"}, +] +iniconfig = [ + {file = "iniconfig-1.1.1-py2.py3-none-any.whl", hash = "sha256:011e24c64b7f47f6ebd835bb12a743f2fbe9a26d4cecaa7f53bc4f35ee9da8b3"}, + {file = "iniconfig-1.1.1.tar.gz", hash = "sha256:bc3af051d7d14b2ee5ef9969666def0cd1a000e121eaea580d4a313df4b37f32"}, +] +jsonschema = [ + {file = "jsonschema-4.5.1-py3-none-any.whl", hash = "sha256:71b5e39324422543546572954ce71c67728922c104902cb7ce252e522235b33f"}, + {file = "jsonschema-4.5.1.tar.gz", hash = "sha256:7c6d882619340c3347a1bf7315e147e6d3dae439033ae6383d6acb908c101dfc"}, +] +mypy = [ + {file = "mypy-0.960-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3a3e525cd76c2c4f90f1449fd034ba21fcca68050ff7c8397bb7dd25dd8b8248"}, + {file = "mypy-0.960-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7a76dc4f91e92db119b1be293892df8379b08fd31795bb44e0ff84256d34c251"}, + {file = "mypy-0.960-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ffdad80a92c100d1b0fe3d3cf1a4724136029a29afe8566404c0146747114382"}, + {file = "mypy-0.960-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7d390248ec07fa344b9f365e6ed9d205bd0205e485c555bed37c4235c868e9d5"}, + {file = "mypy-0.960-cp310-cp310-win_amd64.whl", hash = "sha256:925aa84369a07846b7f3b8556ccade1f371aa554f2bd4fb31cb97a24b73b036e"}, + {file = "mypy-0.960-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:239d6b2242d6c7f5822163ee082ef7a28ee02e7ac86c35593ef923796826a385"}, + {file = "mypy-0.960-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f1ba54d440d4feee49d8768ea952137316d454b15301c44403db3f2cb51af024"}, + {file = "mypy-0.960-cp36-cp36m-win_amd64.whl", hash = "sha256:cb7752b24528c118a7403ee955b6a578bfcf5879d5ee91790667c8ea511d2085"}, + {file = "mypy-0.960-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:826a2917c275e2ee05b7c7b736c1e6549a35b7ea5a198ca457f8c2ebea2cbecf"}, + {file = "mypy-0.960-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3eabcbd2525f295da322dff8175258f3fc4c3eb53f6d1929644ef4d99b92e72d"}, + {file = "mypy-0.960-cp37-cp37m-win_amd64.whl", hash = "sha256:f47322796c412271f5aea48381a528a613f33e0a115452d03ae35d673e6064f8"}, + {file = "mypy-0.960-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2c7f8bb9619290836a4e167e2ef1f2cf14d70e0bc36c04441e41487456561409"}, + {file = "mypy-0.960-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:fbfb873cf2b8d8c3c513367febde932e061a5f73f762896826ba06391d932b2a"}, + {file = "mypy-0.960-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:cc537885891382e08129d9862553b3d00d4be3eb15b8cae9e2466452f52b0117"}, + {file = "mypy-0.960-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:481f98c6b24383188c928f33dd2f0776690807e12e9989dd0419edd5c74aa53b"}, + {file = "mypy-0.960-cp38-cp38-win_amd64.whl", hash = "sha256:29dc94d9215c3eb80ac3c2ad29d0c22628accfb060348fd23d73abe3ace6c10d"}, + {file = "mypy-0.960-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:33d53a232bb79057f33332dbbb6393e68acbcb776d2f571ba4b1d50a2c8ba873"}, + {file = "mypy-0.960-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d645e9e7f7a5da3ec3bbcc314ebb9bb22c7ce39e70367830eb3c08d0140b9ce"}, + {file = "mypy-0.960-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:85cf2b14d32b61db24ade8ac9ae7691bdfc572a403e3cb8537da936e74713275"}, + {file = "mypy-0.960-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a85a20b43fa69efc0b955eba1db435e2ffecb1ca695fe359768e0503b91ea89f"}, + {file = "mypy-0.960-cp39-cp39-win_amd64.whl", hash = "sha256:0ebfb3f414204b98c06791af37a3a96772203da60636e2897408517fcfeee7a8"}, + {file = "mypy-0.960-py3-none-any.whl", hash = "sha256:bfd4f6536bd384c27c392a8b8f790fd0ed5c0cf2f63fc2fed7bce56751d53026"}, + {file = "mypy-0.960.tar.gz", hash = "sha256:d4fccf04c1acf750babd74252e0f2db6bd2ac3aa8fe960797d9f3ef41cf2bfd4"}, +] +mypy-extensions = [ + {file = "mypy_extensions-0.4.3-py2.py3-none-any.whl", hash = "sha256:090fedd75945a69ae91ce1303b5824f428daf5a028d2f6ab8a299250a846f15d"}, + {file = "mypy_extensions-0.4.3.tar.gz", hash = "sha256:2d82818f5bb3e369420cb3c4060a7970edba416647068eb4c5343488a6c604a8"}, +] +nodeenv = [ + {file = "nodeenv-1.6.0-py2.py3-none-any.whl", hash = "sha256:621e6b7076565ddcacd2db0294c0381e01fd28945ab36bcf00f41c5daf63bef7"}, + {file = "nodeenv-1.6.0.tar.gz", hash = "sha256:3ef13ff90291ba2a4a7a4ff9a979b63ffdd00a464dbe04acf0ea6471517a4c2b"}, +] +packaging = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] +pep517 = [ + {file = "pep517-0.12.0-py2.py3-none-any.whl", hash = "sha256:dd884c326898e2c6e11f9e0b64940606a93eb10ea022a2e067959f3a110cf161"}, + {file = "pep517-0.12.0.tar.gz", hash = "sha256:931378d93d11b298cf511dd634cf5ea4cb249a28ef84160b3247ee9afb4e8ab0"}, +] +platformdirs = [ + {file = "platformdirs-2.5.2-py3-none-any.whl", hash = "sha256:027d8e83a2d7de06bbac4e5ef7e023c02b863d7ea5d079477e722bb41ab25788"}, + {file = "platformdirs-2.5.2.tar.gz", hash = "sha256:58c8abb07dcb441e6ee4b11d8df0ac856038f944ab98b7be6b27b2a3c7feef19"}, +] +pluggy = [ + {file = "pluggy-1.0.0-py2.py3-none-any.whl", hash = "sha256:74134bbf457f031a36d68416e1509f34bd5ccc019f0bcc952c7b909d06b37bd3"}, + {file = "pluggy-1.0.0.tar.gz", hash = "sha256:4224373bacce55f955a878bf9cfa763c1e360858e330072059e10bad68531159"}, +] +pre-commit = [ + {file = "pre_commit-2.19.0-py2.py3-none-any.whl", hash = "sha256:10c62741aa5704faea2ad69cb550ca78082efe5697d6f04e5710c3c229afdd10"}, + {file = "pre_commit-2.19.0.tar.gz", hash = "sha256:4233a1e38621c87d9dda9808c6606d7e7ba0e087cd56d3fe03202a01d2919615"}, +] +py = [ + {file = "py-1.11.0-py2.py3-none-any.whl", hash = "sha256:607c53218732647dff4acdfcd50cb62615cedf612e72d1724fb1a0cc6405b378"}, + {file = "py-1.11.0.tar.gz", hash = "sha256:51c75c4126074b472f746a24399ad32f6053d1b34b68d2fa41e558e6f4a98719"}, +] +pygments = [ + {file = "Pygments-2.12.0-py3-none-any.whl", hash = "sha256:dc9c10fb40944260f6ed4c688ece0cd2048414940f1cea51b8b226318411c519"}, + {file = "Pygments-2.12.0.tar.gz", hash = "sha256:5eb116118f9612ff1ee89ac96437bb6b49e8f04d8a13b514ba26f620208e26eb"}, +] +pyparsing = [ + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, +] +pyrsistent = [ + {file = "pyrsistent-0.18.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:df46c854f490f81210870e509818b729db4488e1f30f2a1ce1698b2295a878d1"}, + {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d45866ececf4a5fff8742c25722da6d4c9e180daa7b405dc0a2a2790d668c26"}, + {file = "pyrsistent-0.18.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4ed6784ceac462a7d6fcb7e9b663e93b9a6fb373b7f43594f9ff68875788e01e"}, + {file = "pyrsistent-0.18.1-cp310-cp310-win32.whl", hash = "sha256:e4f3149fd5eb9b285d6bfb54d2e5173f6a116fe19172686797c056672689daf6"}, + {file = "pyrsistent-0.18.1-cp310-cp310-win_amd64.whl", hash = "sha256:636ce2dc235046ccd3d8c56a7ad54e99d5c1cd0ef07d9ae847306c91d11b5fec"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:e92a52c166426efbe0d1ec1332ee9119b6d32fc1f0bbfd55d5c1088070e7fc1b"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d7a096646eab884bf8bed965bad63ea327e0d0c38989fc83c5ea7b8a87037bfc"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cdfd2c361b8a8e5d9499b9082b501c452ade8bbf42aef97ea04854f4a3f43b22"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-win32.whl", hash = "sha256:7ec335fc998faa4febe75cc5268a9eac0478b3f681602c1f27befaf2a1abe1d8"}, + {file = "pyrsistent-0.18.1-cp37-cp37m-win_amd64.whl", hash = "sha256:6455fc599df93d1f60e1c5c4fe471499f08d190d57eca040c0ea182301321286"}, + {file = "pyrsistent-0.18.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fd8da6d0124efa2f67d86fa70c851022f87c98e205f0594e1fae044e7119a5a6"}, + {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7bfe2388663fd18bd8ce7db2c91c7400bf3e1a9e8bd7d63bf7e77d39051b85ec"}, + {file = "pyrsistent-0.18.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0e3e1fcc45199df76053026a51cc59ab2ea3fc7c094c6627e93b7b44cdae2c8c"}, + {file = "pyrsistent-0.18.1-cp38-cp38-win32.whl", hash = "sha256:b568f35ad53a7b07ed9b1b2bae09eb15cdd671a5ba5d2c66caee40dbf91c68ca"}, + {file = "pyrsistent-0.18.1-cp38-cp38-win_amd64.whl", hash = "sha256:d1b96547410f76078eaf66d282ddca2e4baae8964364abb4f4dcdde855cd123a"}, + {file = "pyrsistent-0.18.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f87cc2863ef33c709e237d4b5f4502a62a00fab450c9e020892e8e2ede5847f5"}, + {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6bc66318fb7ee012071b2792024564973ecc80e9522842eb4e17743604b5e045"}, + {file = "pyrsistent-0.18.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:914474c9f1d93080338ace89cb2acee74f4f666fb0424896fcfb8d86058bf17c"}, + {file = "pyrsistent-0.18.1-cp39-cp39-win32.whl", hash = "sha256:1b34eedd6812bf4d33814fca1b66005805d3640ce53140ab8bbb1e2651b0d9bc"}, + {file = "pyrsistent-0.18.1-cp39-cp39-win_amd64.whl", hash = "sha256:e24a828f57e0c337c8d8bb9f6b12f09dfdf0273da25fda9e314f0b684b415a07"}, + {file = "pyrsistent-0.18.1.tar.gz", hash = "sha256:d4d61f8b993a7255ba714df3aca52700f8125289f84f704cf80916517c46eb96"}, +] +pytest = [ + {file = "pytest-7.1.2-py3-none-any.whl", hash = "sha256:13d0e3ccfc2b6e26be000cb6568c832ba67ba32e719443bfe725814d3c42433c"}, + {file = "pytest-7.1.2.tar.gz", hash = "sha256:a06a0425453864a270bc45e71f783330a7428defb4230fb5e6a731fde06ecd45"}, +] +pytest-cov = [ + {file = "pytest-cov-3.0.0.tar.gz", hash = "sha256:e7f0f5b1617d2210a2cabc266dfe2f4c75a8d32fb89eafb7ad9d06f6d076d470"}, + {file = "pytest_cov-3.0.0-py3-none-any.whl", hash = "sha256:578d5d15ac4a25e5f961c938b85a05b09fdaae9deef3bb6de9a6e766622ca7a6"}, +] +pytest-mock = [ + {file = "pytest-mock-3.7.0.tar.gz", hash = "sha256:5112bd92cc9f186ee96e1a92efc84969ea494939c3aead39c50f421c4cc69534"}, + {file = "pytest_mock-3.7.0-py3-none-any.whl", hash = "sha256:6cff27cec936bf81dc5ee87f07132b807bcda51106b5ec4b90a04331cba76231"}, +] +pyyaml = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] +requests = [ + {file = "requests-2.27.1-py2.py3-none-any.whl", hash = "sha256:f22fa1e554c9ddfd16e6e41ac79759e17be9e492b3587efa038054674760e72d"}, + {file = "requests-2.27.1.tar.gz", hash = "sha256:68d7c56fd5a8999887728ef304a6d12edc7be74f1cfa47714fc8b414525c9a61"}, +] +rich = [ + {file = "rich-12.4.1-py3-none-any.whl", hash = "sha256:d13c6c90c42e24eb7ce660db397e8c398edd58acb7f92a2a88a95572b838aaa4"}, + {file = "rich-12.4.1.tar.gz", hash = "sha256:d239001c0fb7de985e21ec9a4bb542b5150350330bbc1849f835b9cbc8923b91"}, +] +six = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] +toml = [ + {file = "toml-0.10.2-py2.py3-none-any.whl", hash = "sha256:806143ae5bfb6a3c6e736a764057db0e6a0e05e338b5630894a5f779cabb4f9b"}, + {file = "toml-0.10.2.tar.gz", hash = "sha256:b3bda1d108d5dd99f4a20d24d9c348e91c4db7ab1b749200bded2f839ccbe68f"}, +] +tomli = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] +tox = [ + {file = "tox-3.25.0-py2.py3-none-any.whl", hash = "sha256:0805727eb4d6b049de304977dfc9ce315a1938e6619c3ab9f38682bb04662a5a"}, + {file = "tox-3.25.0.tar.gz", hash = "sha256:37888f3092aa4e9f835fc8cc6dadbaaa0782651c41ef359e3a5743fcb0308160"}, +] +typed-ast = [ + {file = "typed_ast-1.5.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:669dd0c4167f6f2cd9f57041e03c3c2ebf9063d0757dc89f79ba1daa2bfca9d4"}, + {file = "typed_ast-1.5.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:211260621ab1cd7324e0798d6be953d00b74e0428382991adfddb352252f1d62"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:267e3f78697a6c00c689c03db4876dd1efdfea2f251a5ad6555e82a26847b4ac"}, + {file = "typed_ast-1.5.4-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:c542eeda69212fa10a7ada75e668876fdec5f856cd3d06829e6aa64ad17c8dfe"}, + {file = "typed_ast-1.5.4-cp310-cp310-win_amd64.whl", hash = "sha256:a9916d2bb8865f973824fb47436fa45e1ebf2efd920f2b9f99342cb7fab93f72"}, + {file = "typed_ast-1.5.4-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:79b1e0869db7c830ba6a981d58711c88b6677506e648496b1f64ac7d15633aec"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a94d55d142c9265f4ea46fab70977a1944ecae359ae867397757d836ea5a3f47"}, + {file = "typed_ast-1.5.4-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:183afdf0ec5b1b211724dfef3d2cad2d767cbefac291f24d69b00546c1837fb6"}, + {file = "typed_ast-1.5.4-cp36-cp36m-win_amd64.whl", hash = "sha256:639c5f0b21776605dd6c9dbe592d5228f021404dafd377e2b7ac046b0349b1a1"}, + {file = "typed_ast-1.5.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:cf4afcfac006ece570e32d6fa90ab74a17245b83dfd6655a6f68568098345ff6"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ed855bbe3eb3715fca349c80174cfcfd699c2f9de574d40527b8429acae23a66"}, + {file = "typed_ast-1.5.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:6778e1b2f81dfc7bc58e4b259363b83d2e509a65198e85d5700dfae4c6c8ff1c"}, + {file = "typed_ast-1.5.4-cp37-cp37m-win_amd64.whl", hash = "sha256:0261195c2062caf107831e92a76764c81227dae162c4f75192c0d489faf751a2"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2efae9db7a8c05ad5547d522e7dbe62c83d838d3906a3716d1478b6c1d61388d"}, + {file = "typed_ast-1.5.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7d5d014b7daa8b0bf2eaef684295acae12b036d79f54178b92a2b6a56f92278f"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:370788a63915e82fd6f212865a596a0fefcbb7d408bbbb13dea723d971ed8bdc"}, + {file = "typed_ast-1.5.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4e964b4ff86550a7a7d56345c7864b18f403f5bd7380edf44a3c1fb4ee7ac6c6"}, + {file = "typed_ast-1.5.4-cp38-cp38-win_amd64.whl", hash = "sha256:683407d92dc953c8a7347119596f0b0e6c55eb98ebebd9b23437501b28dcbb8e"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:4879da6c9b73443f97e731b617184a596ac1235fe91f98d279a7af36c796da35"}, + {file = "typed_ast-1.5.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3e123d878ba170397916557d31c8f589951e353cc95fb7f24f6bb69adc1a8a97"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ebd9d7f80ccf7a82ac5f88c521115cc55d84e35bf8b446fcd7836eb6b98929a3"}, + {file = "typed_ast-1.5.4-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98f80dee3c03455e92796b58b98ff6ca0b2a6f652120c263efdba4d6c5e58f72"}, + {file = "typed_ast-1.5.4-cp39-cp39-win_amd64.whl", hash = "sha256:0fdbcf2fef0ca421a3f5912555804296f0b0960f0418c440f5d6d3abb549f3e1"}, + {file = "typed_ast-1.5.4.tar.gz", hash = "sha256:39e21ceb7388e4bb37f4c679d72707ed46c2fbf2a5609b8b8ebc4b067d977df2"}, +] +types-jsonschema = [ + {file = "types-jsonschema-4.4.4.tar.gz", hash = "sha256:d03f0c1a97ff06dda9535dfa51916a98f38bf40d6828ef4d93bc40708effe507"}, + {file = "types_jsonschema-4.4.4-py3-none-any.whl", hash = "sha256:294d2de9ea3564fbec6c56153e84d1f3f7d9b2ada36e183d88a63c126da7bc3d"}, +] +types-setuptools = [ + {file = "types-setuptools-57.4.14.tar.gz", hash = "sha256:df02fe1dd244f58cf4e67cfc3d0a97930a2d61a72dd89f21d81c71017cd83f9a"}, + {file = "types_setuptools-57.4.14-py3-none-any.whl", hash = "sha256:828f7e7e51e157876f47c80518b23ba0c3c36aa8081efd39d5d39f393938aec9"}, +] +typing-extensions = [ + {file = "typing_extensions-4.2.0-py3-none-any.whl", hash = "sha256:6657594ee297170d19f67d55c05852a874e7eb634f4f753dbd667855e07c1708"}, + {file = "typing_extensions-4.2.0.tar.gz", hash = "sha256:f1c24655a0da0d1b67f07e17a5e6b2a105894e6824b92096378bb3668ef02376"}, +] +urllib3 = [ + {file = "urllib3-1.26.9-py2.py3-none-any.whl", hash = "sha256:44ece4d53fb1706f667c9bd1c648f5469a2ec925fcf3a776667042d645472c14"}, + {file = "urllib3-1.26.9.tar.gz", hash = "sha256:aabaf16477806a5e1dd19aa41f8c2b7950dd3c746362d7e3223dbe6de6ac448e"}, +] +vendoring = [ + {file = "vendoring-1.2.0-py2.py3-none-any.whl", hash = "sha256:35b5fca683264e69e851a7580bb6a6f9848af024ffc8382ed5491bcfa55750c6"}, + {file = "vendoring-1.2.0.tar.gz", hash = "sha256:6340a84bf542222c96f22ebc3cb87e4d86932dc04bc8d446e38285594702c00e"}, +] +virtualenv = [ + {file = "virtualenv-20.14.1-py2.py3-none-any.whl", hash = "sha256:e617f16e25b42eb4f6e74096b9c9e37713cf10bf30168fb4a739f3fa8f898a3a"}, + {file = "virtualenv-20.14.1.tar.gz", hash = "sha256:ef589a79795589aada0c1c5b319486797c03b67ac3984c48c669c0e4f50df3a5"}, +] +zipp = [ + {file = "zipp-3.8.0-py3-none-any.whl", hash = "sha256:c4f6e5bbf48e74f7a38e7cc5b0480ff42b0ae5178957d564d18932525d5cf099"}, + {file = "zipp-3.8.0.tar.gz", hash = "sha256:56bf8aadb83c24db6c4b577e13de374ccfb67da2078beba1d037c17980bf43ad"}, +] diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..b819fb3 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,121 @@ +[tool.poetry] +name = "poetry-core" +version = "1.4.0" +description = "Poetry PEP 517 Build Backend" +authors = ["Sébastien Eustace "] + +license = "MIT" + +readme = "README.md" + +homepage = "https://github.com/python-poetry/poetry-core" +repository = "https://github.com/python-poetry/poetry-core" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +packages = [ + { include = "poetry", from = "src" }, +] +include = [ + { path = "tests", format = "sdist" }, +] +exclude = [ + "**/*.pyc", + "**/*.pyi", +] + +[tool.poetry.build] +generate-setup-file = false + +[tool.poetry.urls] +"Bug Tracker" = "https://github.com/python-poetry/poetry/issues" + +[tool.poetry.dependencies] +python = "^3.7" + +# required for compatibility +importlib-metadata = {version = ">=1.7.0", python = "<3.8"} + +[tool.poetry.dev-dependencies] +pre-commit = "^2.15.0" +pyrsistent = "^0.18.0" +pytest = "^7.1.2" +pytest-cov = "^3.0.0" +pytest-mock = "^3.5" +tox = "^3.0" +vendoring = {version = "^1.0", python = "^3.8"} +build = "^0.7.0" +mypy = ">=0.960" +types-jsonschema = ">=4.4.4" +types-setuptools = ">=57.4.14" + +[tool.black] +line-length = 88 +preview = true +include = '\.pyi?$' +extend-exclude = "src/poetry/core/_vendor/*" + +[tool.isort] +profile = "black" +force_single_line = true +atomic = true +include_trailing_comma = true +lines_after_imports = 2 +lines_between_types = 1 +use_parentheses = true +skip_glob = ["*/setup.py", "*/poetry/core/_vendor/*"] +filter_files = true + +known_first_party = "poetry.core" +known_third_party = ["poetry.core._vendor"] + +[tool.mypy] +strict = true +explicit_package_bases = true +namespace_packages = true +show_error_codes = true +enable_error_code = [ + "ignore-without-code", + "redundant-expr", + "truthy-bool", +] +mypy_path = "src" +files = "src, tests" +exclude = "(?x)(^tests/.*/fixtures | ^src/poetry/core/_vendor)" + +[[tool.mypy.overrides]] +module = [ + 'lark.*', + 'tomlkit.*', + 'virtualenv.*', +] +ignore_missing_imports = true + +[tool.vendoring] +destination = "src/poetry/core/_vendor/" +requirements = "src/poetry/core/_vendor/vendor.txt" +namespace = "" + +protected-files = ["vendor.txt"] +patches-dir = "vendors/patches" + +[tool.vendoring.transformations] +drop = [ + "bin/", + "*.so", + "typing.*", + "*/tests/" +] + +[tool.vendoring.license.fallback-urls] +pyrsistent = "https://raw.githubusercontent.com/tobgu/pyrsistent/master/LICENSE.mit" + +[build-system] +requires = [] +build-backend = "poetry.core.masonry.api" +backend-path = ["src"] diff --git a/src/poetry/core/__init__.py b/src/poetry/core/__init__.py new file mode 100644 index 0000000..a061a55 --- /dev/null +++ b/src/poetry/core/__init__.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +import sys + +from pathlib import Path + + +# this cannot presently be replaced with importlib.metadata.version as when building +# itself, poetry-core is not available as an installed distribution. +__version__ = "1.4.0" + +__vendor_site__ = (Path(__file__).parent / "_vendor").as_posix() + +if __vendor_site__ not in sys.path: + sys.path.insert(0, __vendor_site__) diff --git a/src/poetry/core/_vendor/_pyrsistent_version.py b/src/poetry/core/_vendor/_pyrsistent_version.py new file mode 100644 index 0000000..5daae67 --- /dev/null +++ b/src/poetry/core/_vendor/_pyrsistent_version.py @@ -0,0 +1 @@ +__version__ = '0.19.2' diff --git a/src/poetry/core/_vendor/attr/__init__.py b/src/poetry/core/_vendor/attr/__init__.py new file mode 100644 index 0000000..386305d --- /dev/null +++ b/src/poetry/core/_vendor/attr/__init__.py @@ -0,0 +1,79 @@ +# SPDX-License-Identifier: MIT + + +import sys + +from functools import partial + +from . import converters, exceptions, filters, setters, validators +from ._cmp import cmp_using +from ._config import get_run_validators, set_run_validators +from ._funcs import asdict, assoc, astuple, evolve, has, resolve_types +from ._make import ( + NOTHING, + Attribute, + Factory, + attrib, + attrs, + fields, + fields_dict, + make_class, + validate, +) +from ._version_info import VersionInfo + + +__version__ = "22.1.0" +__version_info__ = VersionInfo._from_version_string(__version__) + +__title__ = "attrs" +__description__ = "Classes Without Boilerplate" +__url__ = "https://www.attrs.org/" +__uri__ = __url__ +__doc__ = __description__ + " <" + __uri__ + ">" + +__author__ = "Hynek Schlawack" +__email__ = "hs@ox.cx" + +__license__ = "MIT" +__copyright__ = "Copyright (c) 2015 Hynek Schlawack" + + +s = attributes = attrs +ib = attr = attrib +dataclass = partial(attrs, auto_attribs=True) # happy Easter ;) + +__all__ = [ + "Attribute", + "Factory", + "NOTHING", + "asdict", + "assoc", + "astuple", + "attr", + "attrib", + "attributes", + "attrs", + "cmp_using", + "converters", + "evolve", + "exceptions", + "fields", + "fields_dict", + "filters", + "get_run_validators", + "has", + "ib", + "make_class", + "resolve_types", + "s", + "set_run_validators", + "setters", + "validate", + "validators", +] + +if sys.version_info[:2] >= (3, 6): + from ._next_gen import define, field, frozen, mutable # noqa: F401 + + __all__.extend(("define", "field", "frozen", "mutable")) diff --git a/src/poetry/core/_vendor/attr/_cmp.py b/src/poetry/core/_vendor/attr/_cmp.py new file mode 100644 index 0000000..81b99e4 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_cmp.py @@ -0,0 +1,155 @@ +# SPDX-License-Identifier: MIT + + +import functools +import types + +from ._make import _make_ne + + +_operation_names = {"eq": "==", "lt": "<", "le": "<=", "gt": ">", "ge": ">="} + + +def cmp_using( + eq=None, + lt=None, + le=None, + gt=None, + ge=None, + require_same_type=True, + class_name="Comparable", +): + """ + Create a class that can be passed into `attr.ib`'s ``eq``, ``order``, and + ``cmp`` arguments to customize field comparison. + + The resulting class will have a full set of ordering methods if + at least one of ``{lt, le, gt, ge}`` and ``eq`` are provided. + + :param Optional[callable] eq: `callable` used to evaluate equality + of two objects. + :param Optional[callable] lt: `callable` used to evaluate whether + one object is less than another object. + :param Optional[callable] le: `callable` used to evaluate whether + one object is less than or equal to another object. + :param Optional[callable] gt: `callable` used to evaluate whether + one object is greater than another object. + :param Optional[callable] ge: `callable` used to evaluate whether + one object is greater than or equal to another object. + + :param bool require_same_type: When `True`, equality and ordering methods + will return `NotImplemented` if objects are not of the same type. + + :param Optional[str] class_name: Name of class. Defaults to 'Comparable'. + + See `comparison` for more details. + + .. versionadded:: 21.1.0 + """ + + body = { + "__slots__": ["value"], + "__init__": _make_init(), + "_requirements": [], + "_is_comparable_to": _is_comparable_to, + } + + # Add operations. + num_order_functions = 0 + has_eq_function = False + + if eq is not None: + has_eq_function = True + body["__eq__"] = _make_operator("eq", eq) + body["__ne__"] = _make_ne() + + if lt is not None: + num_order_functions += 1 + body["__lt__"] = _make_operator("lt", lt) + + if le is not None: + num_order_functions += 1 + body["__le__"] = _make_operator("le", le) + + if gt is not None: + num_order_functions += 1 + body["__gt__"] = _make_operator("gt", gt) + + if ge is not None: + num_order_functions += 1 + body["__ge__"] = _make_operator("ge", ge) + + type_ = types.new_class( + class_name, (object,), {}, lambda ns: ns.update(body) + ) + + # Add same type requirement. + if require_same_type: + type_._requirements.append(_check_same_type) + + # Add total ordering if at least one operation was defined. + if 0 < num_order_functions < 4: + if not has_eq_function: + # functools.total_ordering requires __eq__ to be defined, + # so raise early error here to keep a nice stack. + raise ValueError( + "eq must be define is order to complete ordering from " + "lt, le, gt, ge." + ) + type_ = functools.total_ordering(type_) + + return type_ + + +def _make_init(): + """ + Create __init__ method. + """ + + def __init__(self, value): + """ + Initialize object with *value*. + """ + self.value = value + + return __init__ + + +def _make_operator(name, func): + """ + Create operator method. + """ + + def method(self, other): + if not self._is_comparable_to(other): + return NotImplemented + + result = func(self.value, other.value) + if result is NotImplemented: + return NotImplemented + + return result + + method.__name__ = "__%s__" % (name,) + method.__doc__ = "Return a %s b. Computed by attrs." % ( + _operation_names[name], + ) + + return method + + +def _is_comparable_to(self, other): + """ + Check whether `other` is comparable to `self`. + """ + for func in self._requirements: + if not func(self, other): + return False + return True + + +def _check_same_type(self, other): + """ + Return True if *self* and *other* are of the same type, False otherwise. + """ + return other.value.__class__ is self.value.__class__ diff --git a/src/poetry/core/_vendor/attr/_compat.py b/src/poetry/core/_vendor/attr/_compat.py new file mode 100644 index 0000000..5826493 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_compat.py @@ -0,0 +1,185 @@ +# SPDX-License-Identifier: MIT + + +import inspect +import platform +import sys +import threading +import types +import warnings + +from collections.abc import Mapping, Sequence # noqa + + +PYPY = platform.python_implementation() == "PyPy" +PY36 = sys.version_info[:2] >= (3, 6) +HAS_F_STRINGS = PY36 +PY310 = sys.version_info[:2] >= (3, 10) + + +if PYPY or PY36: + ordered_dict = dict +else: + from collections import OrderedDict + + ordered_dict = OrderedDict + + +def just_warn(*args, **kw): + warnings.warn( + "Running interpreter doesn't sufficiently support code object " + "introspection. Some features like bare super() or accessing " + "__class__ will not work with slotted classes.", + RuntimeWarning, + stacklevel=2, + ) + + +class _AnnotationExtractor: + """ + Extract type annotations from a callable, returning None whenever there + is none. + """ + + __slots__ = ["sig"] + + def __init__(self, callable): + try: + self.sig = inspect.signature(callable) + except (ValueError, TypeError): # inspect failed + self.sig = None + + def get_first_param_type(self): + """ + Return the type annotation of the first argument if it's not empty. + """ + if not self.sig: + return None + + params = list(self.sig.parameters.values()) + if params and params[0].annotation is not inspect.Parameter.empty: + return params[0].annotation + + return None + + def get_return_type(self): + """ + Return the return type if it's not empty. + """ + if ( + self.sig + and self.sig.return_annotation is not inspect.Signature.empty + ): + return self.sig.return_annotation + + return None + + +def make_set_closure_cell(): + """Return a function of two arguments (cell, value) which sets + the value stored in the closure cell `cell` to `value`. + """ + # pypy makes this easy. (It also supports the logic below, but + # why not do the easy/fast thing?) + if PYPY: + + def set_closure_cell(cell, value): + cell.__setstate__((value,)) + + return set_closure_cell + + # Otherwise gotta do it the hard way. + + # Create a function that will set its first cellvar to `value`. + def set_first_cellvar_to(value): + x = value + return + + # This function will be eliminated as dead code, but + # not before its reference to `x` forces `x` to be + # represented as a closure cell rather than a local. + def force_x_to_be_a_cell(): # pragma: no cover + return x + + try: + # Extract the code object and make sure our assumptions about + # the closure behavior are correct. + co = set_first_cellvar_to.__code__ + if co.co_cellvars != ("x",) or co.co_freevars != (): + raise AssertionError # pragma: no cover + + # Convert this code object to a code object that sets the + # function's first _freevar_ (not cellvar) to the argument. + if sys.version_info >= (3, 8): + + def set_closure_cell(cell, value): + cell.cell_contents = value + + else: + args = [co.co_argcount] + args.append(co.co_kwonlyargcount) + args.extend( + [ + co.co_nlocals, + co.co_stacksize, + co.co_flags, + co.co_code, + co.co_consts, + co.co_names, + co.co_varnames, + co.co_filename, + co.co_name, + co.co_firstlineno, + co.co_lnotab, + # These two arguments are reversed: + co.co_cellvars, + co.co_freevars, + ] + ) + set_first_freevar_code = types.CodeType(*args) + + def set_closure_cell(cell, value): + # Create a function using the set_first_freevar_code, + # whose first closure cell is `cell`. Calling it will + # change the value of that cell. + setter = types.FunctionType( + set_first_freevar_code, {}, "setter", (), (cell,) + ) + # And call it to set the cell. + setter(value) + + # Make sure it works on this interpreter: + def make_func_with_cell(): + x = None + + def func(): + return x # pragma: no cover + + return func + + cell = make_func_with_cell().__closure__[0] + set_closure_cell(cell, 100) + if cell.cell_contents != 100: + raise AssertionError # pragma: no cover + + except Exception: + return just_warn + else: + return set_closure_cell + + +set_closure_cell = make_set_closure_cell() + +# Thread-local global to track attrs instances which are already being repr'd. +# This is needed because there is no other (thread-safe) way to pass info +# about the instances that are already being repr'd through the call stack +# in order to ensure we don't perform infinite recursion. +# +# For instance, if an instance contains a dict which contains that instance, +# we need to know that we're already repr'ing the outside instance from within +# the dict's repr() call. +# +# This lives here rather than in _make.py so that the functions in _make.py +# don't have a direct reference to the thread-local in their globals dict. +# If they have such a reference, it breaks cloudpickle. +repr_context = threading.local() diff --git a/src/poetry/core/_vendor/attr/_config.py b/src/poetry/core/_vendor/attr/_config.py new file mode 100644 index 0000000..96d4200 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_config.py @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: MIT + + +__all__ = ["set_run_validators", "get_run_validators"] + +_run_validators = True + + +def set_run_validators(run): + """ + Set whether or not validators are run. By default, they are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.set_disabled()` + instead. + """ + if not isinstance(run, bool): + raise TypeError("'run' must be bool.") + global _run_validators + _run_validators = run + + +def get_run_validators(): + """ + Return whether or not validators are run. + + .. deprecated:: 21.3.0 It will not be removed, but it also will not be + moved to new ``attrs`` namespace. Use `attrs.validators.get_disabled()` + instead. + """ + return _run_validators diff --git a/src/poetry/core/_vendor/attr/_funcs.py b/src/poetry/core/_vendor/attr/_funcs.py new file mode 100644 index 0000000..a982d7c --- /dev/null +++ b/src/poetry/core/_vendor/attr/_funcs.py @@ -0,0 +1,420 @@ +# SPDX-License-Identifier: MIT + + +import copy + +from ._make import NOTHING, _obj_setattr, fields +from .exceptions import AttrsAttributeNotFoundError + + +def asdict( + inst, + recurse=True, + filter=None, + dict_factory=dict, + retain_collection_types=False, + value_serializer=None, +): + """ + Return the ``attrs`` attribute values of *inst* as a dict. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable dict_factory: A callable to produce dictionaries from. For + example, to produce ordered dictionaries instead of normal Python + dictionaries, pass in ``collections.OrderedDict``. + :param bool retain_collection_types: Do not convert to ``list`` when + encountering an attribute whose type is ``tuple`` or ``set``. Only + meaningful if ``recurse`` is ``True``. + :param Optional[callable] value_serializer: A hook that is called for every + attribute or dict key/value. It receives the current instance, field + and value and must return the (updated) value. The hook is run *after* + the optional *filter* has been applied. + + :rtype: return type of *dict_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.0.0 *dict_factory* + .. versionadded:: 16.1.0 *retain_collection_types* + .. versionadded:: 20.3.0 *value_serializer* + .. versionadded:: 21.3.0 If a dict has a collection for a key, it is + serialized as a tuple. + """ + attrs = fields(inst.__class__) + rv = dict_factory() + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + + if value_serializer is not None: + v = value_serializer(inst, a, v) + + if recurse is True: + if has(v.__class__): + rv[a.name] = asdict( + v, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain_collection_types is True else list + rv[a.name] = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in v + ] + ) + elif isinstance(v, dict): + df = dict_factory + rv[a.name] = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in v.items() + ) + else: + rv[a.name] = v + else: + rv[a.name] = v + return rv + + +def _asdict_anything( + val, + is_key, + filter, + dict_factory, + retain_collection_types, + value_serializer, +): + """ + ``asdict`` only works on attrs instances, this works on anything. + """ + if getattr(val.__class__, "__attrs_attrs__", None) is not None: + # Attrs class. + rv = asdict( + val, + recurse=True, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + elif isinstance(val, (tuple, list, set, frozenset)): + if retain_collection_types is True: + cf = val.__class__ + elif is_key: + cf = tuple + else: + cf = list + + rv = cf( + [ + _asdict_anything( + i, + is_key=False, + filter=filter, + dict_factory=dict_factory, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ) + for i in val + ] + ) + elif isinstance(val, dict): + df = dict_factory + rv = df( + ( + _asdict_anything( + kk, + is_key=True, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + _asdict_anything( + vv, + is_key=False, + filter=filter, + dict_factory=df, + retain_collection_types=retain_collection_types, + value_serializer=value_serializer, + ), + ) + for kk, vv in val.items() + ) + else: + rv = val + if value_serializer is not None: + rv = value_serializer(None, None, rv) + + return rv + + +def astuple( + inst, + recurse=True, + filter=None, + tuple_factory=tuple, + retain_collection_types=False, +): + """ + Return the ``attrs`` attribute values of *inst* as a tuple. + + Optionally recurse into other ``attrs``-decorated classes. + + :param inst: Instance of an ``attrs``-decorated class. + :param bool recurse: Recurse into classes that are also + ``attrs``-decorated. + :param callable filter: A callable whose return code determines whether an + attribute or element is included (``True``) or dropped (``False``). Is + called with the `attrs.Attribute` as the first argument and the + value as the second argument. + :param callable tuple_factory: A callable to produce tuples from. For + example, to produce lists instead of tuples. + :param bool retain_collection_types: Do not convert to ``list`` + or ``dict`` when encountering an attribute which type is + ``tuple``, ``dict`` or ``set``. Only meaningful if ``recurse`` is + ``True``. + + :rtype: return type of *tuple_factory* + + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 16.2.0 + """ + attrs = fields(inst.__class__) + rv = [] + retain = retain_collection_types # Very long. :/ + for a in attrs: + v = getattr(inst, a.name) + if filter is not None and not filter(a, v): + continue + if recurse is True: + if has(v.__class__): + rv.append( + astuple( + v, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + ) + elif isinstance(v, (tuple, list, set, frozenset)): + cf = v.__class__ if retain is True else list + rv.append( + cf( + [ + astuple( + j, + recurse=True, + filter=filter, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(j.__class__) + else j + for j in v + ] + ) + ) + elif isinstance(v, dict): + df = v.__class__ if retain is True else dict + rv.append( + df( + ( + astuple( + kk, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(kk.__class__) + else kk, + astuple( + vv, + tuple_factory=tuple_factory, + retain_collection_types=retain, + ) + if has(vv.__class__) + else vv, + ) + for kk, vv in v.items() + ) + ) + else: + rv.append(v) + else: + rv.append(v) + + return rv if tuple_factory is list else tuple_factory(rv) + + +def has(cls): + """ + Check whether *cls* is a class with ``attrs`` attributes. + + :param type cls: Class to introspect. + :raise TypeError: If *cls* is not a class. + + :rtype: bool + """ + return getattr(cls, "__attrs_attrs__", None) is not None + + +def assoc(inst, **changes): + """ + Copy *inst* and apply *changes*. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise attr.exceptions.AttrsAttributeNotFoundError: If *attr_name* couldn't + be found on *cls*. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. deprecated:: 17.1.0 + Use `attrs.evolve` instead if you can. + This function will not be removed du to the slightly different approach + compared to `attrs.evolve`. + """ + import warnings + + warnings.warn( + "assoc is deprecated and will be removed after 2018/01.", + DeprecationWarning, + stacklevel=2, + ) + new = copy.copy(inst) + attrs = fields(inst.__class__) + for k, v in changes.items(): + a = getattr(attrs, k, NOTHING) + if a is NOTHING: + raise AttrsAttributeNotFoundError( + "{k} is not an attrs attribute on {cl}.".format( + k=k, cl=new.__class__ + ) + ) + _obj_setattr(new, k, v) + return new + + +def evolve(inst, **changes): + """ + Create a new instance, based on *inst* with *changes* applied. + + :param inst: Instance of a class with ``attrs`` attributes. + :param changes: Keyword changes in the new copy. + + :return: A copy of inst with *changes* incorporated. + + :raise TypeError: If *attr_name* couldn't be found in the class + ``__init__``. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + .. versionadded:: 17.1.0 + """ + cls = inst.__class__ + attrs = fields(cls) + for a in attrs: + if not a.init: + continue + attr_name = a.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(inst, attr_name) + + return cls(**changes) + + +def resolve_types(cls, globalns=None, localns=None, attribs=None): + """ + Resolve any strings and forward annotations in type annotations. + + This is only required if you need concrete types in `Attribute`'s *type* + field. In other words, you don't need to resolve your types if you only + use them for static type checking. + + With no arguments, names will be looked up in the module in which the class + was created. If this is not what you want, e.g. if the name only exists + inside a method, you may pass *globalns* or *localns* to specify other + dictionaries in which to look up these names. See the docs of + `typing.get_type_hints` for more details. + + :param type cls: Class to resolve. + :param Optional[dict] globalns: Dictionary containing global variables. + :param Optional[dict] localns: Dictionary containing local variables. + :param Optional[list] attribs: List of attribs for the given class. + This is necessary when calling from inside a ``field_transformer`` + since *cls* is not an ``attrs`` class yet. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class and you didn't pass any attribs. + :raise NameError: If types cannot be resolved because of missing variables. + + :returns: *cls* so you can use this function also as a class decorator. + Please note that you have to apply it **after** `attrs.define`. That + means the decorator has to come in the line **before** `attrs.define`. + + .. versionadded:: 20.1.0 + .. versionadded:: 21.1.0 *attribs* + + """ + # Since calling get_type_hints is expensive we cache whether we've + # done it already. + if getattr(cls, "__attrs_types_resolved__", None) != cls: + import typing + + hints = typing.get_type_hints(cls, globalns=globalns, localns=localns) + for field in fields(cls) if attribs is None else attribs: + if field.name in hints: + # Since fields have been frozen we must work around it. + _obj_setattr(field, "type", hints[field.name]) + # We store the class we resolved so that subclasses know they haven't + # been resolved. + cls.__attrs_types_resolved__ = cls + + # Return the class so you can use it as a decorator too. + return cls diff --git a/src/poetry/core/_vendor/attr/_make.py b/src/poetry/core/_vendor/attr/_make.py new file mode 100644 index 0000000..4d1afe3 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_make.py @@ -0,0 +1,3006 @@ +# SPDX-License-Identifier: MIT + +import copy +import linecache +import sys +import types +import typing + +from operator import itemgetter + +# We need to import _compat itself in addition to the _compat members to avoid +# having the thread-local in the globals here. +from . import _compat, _config, setters +from ._compat import ( + HAS_F_STRINGS, + PY310, + PYPY, + _AnnotationExtractor, + ordered_dict, + set_closure_cell, +) +from .exceptions import ( + DefaultAlreadySetError, + FrozenInstanceError, + NotAnAttrsClassError, + UnannotatedAttributeError, +) + + +# This is used at least twice, so cache it here. +_obj_setattr = object.__setattr__ +_init_converter_pat = "__attr_converter_%s" +_init_factory_pat = "__attr_factory_{}" +_tuple_property_pat = ( + " {attr_name} = _attrs_property(_attrs_itemgetter({index}))" +) +_classvar_prefixes = ( + "typing.ClassVar", + "t.ClassVar", + "ClassVar", + "typing_extensions.ClassVar", +) +# we don't use a double-underscore prefix because that triggers +# name mangling when trying to create a slot for the field +# (when slots=True) +_hash_cache_field = "_attrs_cached_hash" + +_empty_metadata_singleton = types.MappingProxyType({}) + +# Unique object for unequivocal getattr() defaults. +_sentinel = object() + +_ng_default_on_setattr = setters.pipe(setters.convert, setters.validate) + + +class _Nothing: + """ + Sentinel class to indicate the lack of a value when ``None`` is ambiguous. + + ``_Nothing`` is a singleton. There is only ever one of it. + + .. versionchanged:: 21.1.0 ``bool(NOTHING)`` is now False. + """ + + _singleton = None + + def __new__(cls): + if _Nothing._singleton is None: + _Nothing._singleton = super().__new__(cls) + return _Nothing._singleton + + def __repr__(self): + return "NOTHING" + + def __bool__(self): + return False + + +NOTHING = _Nothing() +""" +Sentinel to indicate the lack of a value when ``None`` is ambiguous. +""" + + +class _CacheHashWrapper(int): + """ + An integer subclass that pickles / copies as None + + This is used for non-slots classes with ``cache_hash=True``, to avoid + serializing a potentially (even likely) invalid hash value. Since ``None`` + is the default value for uncalculated hashes, whenever this is copied, + the copy's value for the hash should automatically reset. + + See GH #613 for more details. + """ + + def __reduce__(self, _none_constructor=type(None), _args=()): + return _none_constructor, _args + + +def attrib( + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=None, + init=True, + metadata=None, + type=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Create a new attribute on a class. + + .. warning:: + + Does *not* do anything unless the class is also decorated with + `attr.s`! + + :param default: A value that is used if an ``attrs``-generated ``__init__`` + is used and no value is passed while instantiating or the attribute is + excluded using ``init=False``. + + If the value is an instance of `attrs.Factory`, its callable will be + used to construct a new value (useful for mutable data types like lists + or dicts). + + If a default is not set (or set manually to `attrs.NOTHING`), a value + *must* be supplied when instantiating; otherwise a `TypeError` + will be raised. + + The default can also be set using decorator notation as shown below. + + :type default: Any value + + :param callable factory: Syntactic sugar for + ``default=attr.Factory(factory)``. + + :param validator: `callable` that is called by ``attrs``-generated + ``__init__`` methods after the instance has been initialized. They + receive the initialized instance, the :func:`~attrs.Attribute`, and the + passed value. + + The return value is *not* inspected so the validator has to throw an + exception itself. + + If a `list` is passed, its items are treated as validators and must + all pass. + + Validators can be globally disabled and re-enabled using + `get_run_validators`. + + The validator can also be set using decorator notation as shown below. + + :type validator: `callable` or a `list` of `callable`\\ s. + + :param repr: Include this attribute in the generated ``__repr__`` + method. If ``True``, include the attribute; if ``False``, omit it. By + default, the built-in ``repr()`` function is used. To override how the + attribute value is formatted, pass a ``callable`` that takes a single + value and returns a string. Note that the resulting string is used + as-is, i.e. it will be used directly *instead* of calling ``repr()`` + (the default). + :type repr: a `bool` or a `callable` to use a custom function. + + :param eq: If ``True`` (default), include this attribute in the + generated ``__eq__`` and ``__ne__`` methods that check two instances + for equality. To override how the attribute value is compared, + pass a ``callable`` that takes a single value and returns the value + to be compared. + :type eq: a `bool` or a `callable`. + + :param order: If ``True`` (default), include this attributes in the + generated ``__lt__``, ``__le__``, ``__gt__`` and ``__ge__`` methods. + To override how the attribute value is ordered, + pass a ``callable`` that takes a single value and returns the value + to be ordered. + :type order: a `bool` or a `callable`. + + :param cmp: Setting *cmp* is equivalent to setting *eq* and *order* to the + same value. Must not be mixed with *eq* or *order*. + :type cmp: a `bool` or a `callable`. + + :param Optional[bool] hash: Include this attribute in the generated + ``__hash__`` method. If ``None`` (default), mirror *eq*'s value. This + is the correct behavior according the Python spec. Setting this value + to anything else than ``None`` is *discouraged*. + :param bool init: Include this attribute in the generated ``__init__`` + method. It is possible to set this to ``False`` and set a default + value. In that case this attributed is unconditionally initialized + with the specified default value or factory. + :param callable converter: `callable` that is called by + ``attrs``-generated ``__init__`` methods to convert attribute's value + to the desired format. It is given the passed-in value, and the + returned value will be used as the new value of the attribute. The + value is converted before being passed to the validator, if any. + :param metadata: An arbitrary mapping, to be used by third-party + components. See `extending_metadata`. + :param type: The type of the attribute. In Python 3.6 or greater, the + preferred method to specify the type is using a variable annotation + (see :pep:`526`). + This argument is provided for backward compatibility. + Regardless of the approach used, the type will be stored on + ``Attribute.type``. + + Please note that ``attrs`` doesn't do anything with this metadata by + itself. You can use it as part of your own code or for + `static type checking `. + :param kw_only: Make this attribute keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param on_setattr: Allows to overwrite the *on_setattr* setting from + `attr.s`. If left `None`, the *on_setattr* value from `attr.s` is used. + Set to `attrs.setters.NO_OP` to run **no** `setattr` hooks for this + attribute -- regardless of the setting in `attr.s`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + .. versionadded:: 15.2.0 *convert* + .. versionadded:: 16.3.0 *metadata* + .. versionchanged:: 17.1.0 *validator* can be a ``list`` now. + .. versionchanged:: 17.1.0 + *hash* is ``None`` and therefore mirrors *eq* by default. + .. versionadded:: 17.3.0 *type* + .. deprecated:: 17.4.0 *convert* + .. versionadded:: 17.4.0 *converter* as a replacement for the deprecated + *convert* to achieve consistency with other noun-based arguments. + .. versionadded:: 18.1.0 + ``factory=f`` is syntactic sugar for ``default=attr.Factory(f)``. + .. versionadded:: 18.2.0 *kw_only* + .. versionchanged:: 19.2.0 *convert* keyword argument removed. + .. versionchanged:: 19.2.0 *repr* also accepts a custom callable. + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.3.0 *kw_only* backported to Python 2 + .. versionchanged:: 21.1.0 + *eq*, *order*, and *cmp* also accept a custom callable + .. versionchanged:: 21.1.0 *cmp* undeprecated + """ + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq, order, True + ) + + if hash is not None and hash is not True and hash is not False: + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + + if factory is not None: + if default is not NOTHING: + raise ValueError( + "The `default` and `factory` arguments are mutually " + "exclusive." + ) + if not callable(factory): + raise ValueError("The `factory` argument must be a callable.") + default = Factory(factory) + + if metadata is None: + metadata = {} + + # Apply syntactic sugar by auto-wrapping. + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + if validator and isinstance(validator, (list, tuple)): + validator = and_(*validator) + + if converter and isinstance(converter, (list, tuple)): + converter = pipe(*converter) + + return _CountingAttr( + default=default, + validator=validator, + repr=repr, + cmp=None, + hash=hash, + init=init, + converter=converter, + metadata=metadata, + type=type, + kw_only=kw_only, + eq=eq, + eq_key=eq_key, + order=order, + order_key=order_key, + on_setattr=on_setattr, + ) + + +def _compile_and_eval(script, globs, locs=None, filename=""): + """ + "Exec" the script with the given global (globs) and local (locs) variables. + """ + bytecode = compile(script, filename, "exec") + eval(bytecode, globs, locs) + + +def _make_method(name, script, filename, globs): + """ + Create the method with the script given and return the method object. + """ + locs = {} + + # In order of debuggers like PDB being able to step through the code, + # we add a fake linecache entry. + count = 1 + base_filename = filename + while True: + linecache_tuple = ( + len(script), + None, + script.splitlines(True), + filename, + ) + old_val = linecache.cache.setdefault(filename, linecache_tuple) + if old_val == linecache_tuple: + break + else: + filename = "{}-{}>".format(base_filename[:-1], count) + count += 1 + + _compile_and_eval(script, globs, locs, filename) + + return locs[name] + + +def _make_attr_tuple_class(cls_name, attr_names): + """ + Create a tuple subclass to hold `Attribute`s for an `attrs` class. + + The subclass is a bare tuple with properties for names. + + class MyClassAttributes(tuple): + __slots__ = () + x = property(itemgetter(0)) + """ + attr_class_name = "{}Attributes".format(cls_name) + attr_class_template = [ + "class {}(tuple):".format(attr_class_name), + " __slots__ = ()", + ] + if attr_names: + for i, attr_name in enumerate(attr_names): + attr_class_template.append( + _tuple_property_pat.format(index=i, attr_name=attr_name) + ) + else: + attr_class_template.append(" pass") + globs = {"_attrs_itemgetter": itemgetter, "_attrs_property": property} + _compile_and_eval("\n".join(attr_class_template), globs) + return globs[attr_class_name] + + +# Tuple class for extracted attributes from a class definition. +# `base_attrs` is a subset of `attrs`. +_Attributes = _make_attr_tuple_class( + "_Attributes", + [ + # all attributes to build dunder methods for + "attrs", + # attributes that have been inherited + "base_attrs", + # map inherited attributes to their originating classes + "base_attrs_map", + ], +) + + +def _is_class_var(annot): + """ + Check whether *annot* is a typing.ClassVar. + + The string comparison hack is used to avoid evaluating all string + annotations which would put attrs-based classes at a performance + disadvantage compared to plain old classes. + """ + annot = str(annot) + + # Annotation can be quoted. + if annot.startswith(("'", '"')) and annot.endswith(("'", '"')): + annot = annot[1:-1] + + return annot.startswith(_classvar_prefixes) + + +def _has_own_attribute(cls, attrib_name): + """ + Check whether *cls* defines *attrib_name* (and doesn't just inherit it). + + Requires Python 3. + """ + attr = getattr(cls, attrib_name, _sentinel) + if attr is _sentinel: + return False + + for base_cls in cls.__mro__[1:]: + a = getattr(base_cls, attrib_name, None) + if attr is a: + return False + + return True + + +def _get_annotations(cls): + """ + Get annotations for *cls*. + """ + if _has_own_attribute(cls, "__annotations__"): + return cls.__annotations__ + + return {} + + +def _counter_getter(e): + """ + Key function for sorting to avoid re-creating a lambda for every class. + """ + return e[1].counter + + +def _collect_base_attrs(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in reversed(cls.__mro__[1:-1]): + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.inherited or a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + # For each name, only keep the freshest definition i.e. the furthest at the + # back. base_attr_map is fine because it gets overwritten with every new + # instance. + filtered = [] + seen = set() + for a in reversed(base_attrs): + if a.name in seen: + continue + filtered.insert(0, a) + seen.add(a.name) + + return filtered, base_attr_map + + +def _collect_base_attrs_broken(cls, taken_attr_names): + """ + Collect attr.ibs from base classes of *cls*, except *taken_attr_names*. + + N.B. *taken_attr_names* will be mutated. + + Adhere to the old incorrect behavior. + + Notably it collects from the front and considers inherited attributes which + leads to the buggy behavior reported in #428. + """ + base_attrs = [] + base_attr_map = {} # A dictionary of base attrs to their classes. + + # Traverse the MRO and collect attributes. + for base_cls in cls.__mro__[1:-1]: + for a in getattr(base_cls, "__attrs_attrs__", []): + if a.name in taken_attr_names: + continue + + a = a.evolve(inherited=True) + taken_attr_names.add(a.name) + base_attrs.append(a) + base_attr_map[a.name] = base_cls + + return base_attrs, base_attr_map + + +def _transform_attrs( + cls, these, auto_attribs, kw_only, collect_by_mro, field_transformer +): + """ + Transform all `_CountingAttr`s on a class into `Attribute`s. + + If *these* is passed, use that and don't look for them on the class. + + *collect_by_mro* is True, collect them in the correct MRO order, otherwise + use the old -- incorrect -- order. See #428. + + Return an `_Attributes`. + """ + cd = cls.__dict__ + anns = _get_annotations(cls) + + if these is not None: + ca_list = [(name, ca) for name, ca in these.items()] + + if not isinstance(these, ordered_dict): + ca_list.sort(key=_counter_getter) + elif auto_attribs is True: + ca_names = { + name + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + } + ca_list = [] + annot_names = set() + for attr_name, type in anns.items(): + if _is_class_var(type): + continue + annot_names.add(attr_name) + a = cd.get(attr_name, NOTHING) + + if not isinstance(a, _CountingAttr): + if a is NOTHING: + a = attrib() + else: + a = attrib(default=a) + ca_list.append((attr_name, a)) + + unannotated = ca_names - annot_names + if len(unannotated) > 0: + raise UnannotatedAttributeError( + "The following `attr.ib`s lack a type annotation: " + + ", ".join( + sorted(unannotated, key=lambda n: cd.get(n).counter) + ) + + "." + ) + else: + ca_list = sorted( + ( + (name, attr) + for name, attr in cd.items() + if isinstance(attr, _CountingAttr) + ), + key=lambda e: e[1].counter, + ) + + own_attrs = [ + Attribute.from_counting_attr( + name=attr_name, ca=ca, type=anns.get(attr_name) + ) + for attr_name, ca in ca_list + ] + + if collect_by_mro: + base_attrs, base_attr_map = _collect_base_attrs( + cls, {a.name for a in own_attrs} + ) + else: + base_attrs, base_attr_map = _collect_base_attrs_broken( + cls, {a.name for a in own_attrs} + ) + + if kw_only: + own_attrs = [a.evolve(kw_only=True) for a in own_attrs] + base_attrs = [a.evolve(kw_only=True) for a in base_attrs] + + attrs = base_attrs + own_attrs + + # Mandatory vs non-mandatory attr order only matters when they are part of + # the __init__ signature and when they aren't kw_only (which are moved to + # the end and can be mandatory or non-mandatory in any order, as they will + # be specified as keyword args anyway). Check the order of those attrs: + had_default = False + for a in (a for a in attrs if a.init is not False and a.kw_only is False): + if had_default is True and a.default is NOTHING: + raise ValueError( + "No mandatory attributes allowed after an attribute with a " + "default value or factory. Attribute in question: %r" % (a,) + ) + + if had_default is False and a.default is not NOTHING: + had_default = True + + if field_transformer is not None: + attrs = field_transformer(cls, attrs) + + # Create AttrsClass *after* applying the field_transformer since it may + # add or remove attributes! + attr_names = [a.name for a in attrs] + AttrsClass = _make_attr_tuple_class(cls.__name__, attr_names) + + return _Attributes((AttrsClass(attrs), base_attrs, base_attr_map)) + + +if PYPY: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + if isinstance(self, BaseException) and name in ( + "__cause__", + "__context__", + ): + BaseException.__setattr__(self, name, value) + return + + raise FrozenInstanceError() + +else: + + def _frozen_setattrs(self, name, value): + """ + Attached to frozen classes as __setattr__. + """ + raise FrozenInstanceError() + + +def _frozen_delattrs(self, name): + """ + Attached to frozen classes as __delattr__. + """ + raise FrozenInstanceError() + + +class _ClassBuilder: + """ + Iteratively build *one* class. + """ + + __slots__ = ( + "_attr_names", + "_attrs", + "_base_attr_map", + "_base_names", + "_cache_hash", + "_cls", + "_cls_dict", + "_delete_attribs", + "_frozen", + "_has_pre_init", + "_has_post_init", + "_is_exc", + "_on_setattr", + "_slots", + "_weakref_slot", + "_wrote_own_setattr", + "_has_custom_setattr", + ) + + def __init__( + self, + cls, + these, + slots, + frozen, + weakref_slot, + getstate_setstate, + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_custom_setattr, + field_transformer, + ): + attrs, base_attrs, base_map = _transform_attrs( + cls, + these, + auto_attribs, + kw_only, + collect_by_mro, + field_transformer, + ) + + self._cls = cls + self._cls_dict = dict(cls.__dict__) if slots else {} + self._attrs = attrs + self._base_names = {a.name for a in base_attrs} + self._base_attr_map = base_map + self._attr_names = tuple(a.name for a in attrs) + self._slots = slots + self._frozen = frozen + self._weakref_slot = weakref_slot + self._cache_hash = cache_hash + self._has_pre_init = bool(getattr(cls, "__attrs_pre_init__", False)) + self._has_post_init = bool(getattr(cls, "__attrs_post_init__", False)) + self._delete_attribs = not bool(these) + self._is_exc = is_exc + self._on_setattr = on_setattr + + self._has_custom_setattr = has_custom_setattr + self._wrote_own_setattr = False + + self._cls_dict["__attrs_attrs__"] = self._attrs + + if frozen: + self._cls_dict["__setattr__"] = _frozen_setattrs + self._cls_dict["__delattr__"] = _frozen_delattrs + + self._wrote_own_setattr = True + elif on_setattr in ( + _ng_default_on_setattr, + setters.validate, + setters.convert, + ): + has_validator = has_converter = False + for a in attrs: + if a.validator is not None: + has_validator = True + if a.converter is not None: + has_converter = True + + if has_validator and has_converter: + break + if ( + ( + on_setattr == _ng_default_on_setattr + and not (has_validator or has_converter) + ) + or (on_setattr == setters.validate and not has_validator) + or (on_setattr == setters.convert and not has_converter) + ): + # If class-level on_setattr is set to convert + validate, but + # there's no field to convert or validate, pretend like there's + # no on_setattr. + self._on_setattr = None + + if getstate_setstate: + ( + self._cls_dict["__getstate__"], + self._cls_dict["__setstate__"], + ) = self._make_getstate_setstate() + + def __repr__(self): + return "<_ClassBuilder(cls={cls})>".format(cls=self._cls.__name__) + + def build_class(self): + """ + Finalize class based on the accumulated configuration. + + Builder cannot be used after calling this method. + """ + if self._slots is True: + return self._create_slots_class() + else: + return self._patch_original_class() + + def _patch_original_class(self): + """ + Apply accumulated methods and return the class. + """ + cls = self._cls + base_names = self._base_names + + # Clean class of attribute definitions (`attr.ib()`s). + if self._delete_attribs: + for name in self._attr_names: + if ( + name not in base_names + and getattr(cls, name, _sentinel) is not _sentinel + ): + try: + delattr(cls, name) + except AttributeError: + # This can happen if a base class defines a class + # variable and we want to set an attribute with the + # same name by using only a type annotation. + pass + + # Attach our dunder methods. + for name, value in self._cls_dict.items(): + setattr(cls, name, value) + + # If we've inherited an attrs __setattr__ and don't write our own, + # reset it to object's. + if not self._wrote_own_setattr and getattr( + cls, "__attrs_own_setattr__", False + ): + cls.__attrs_own_setattr__ = False + + if not self._has_custom_setattr: + cls.__setattr__ = _obj_setattr + + return cls + + def _create_slots_class(self): + """ + Build and return a new class with a `__slots__` attribute. + """ + cd = { + k: v + for k, v in self._cls_dict.items() + if k not in tuple(self._attr_names) + ("__dict__", "__weakref__") + } + + # If our class doesn't have its own implementation of __setattr__ + # (either from the user or by us), check the bases, if one of them has + # an attrs-made __setattr__, that needs to be reset. We don't walk the + # MRO because we only care about our immediate base classes. + # XXX: This can be confused by subclassing a slotted attrs class with + # XXX: a non-attrs class and subclass the resulting class with an attrs + # XXX: class. See `test_slotted_confused` for details. For now that's + # XXX: OK with us. + if not self._wrote_own_setattr: + cd["__attrs_own_setattr__"] = False + + if not self._has_custom_setattr: + for base_cls in self._cls.__bases__: + if base_cls.__dict__.get("__attrs_own_setattr__", False): + cd["__setattr__"] = _obj_setattr + break + + # Traverse the MRO to collect existing slots + # and check for an existing __weakref__. + existing_slots = dict() + weakref_inherited = False + for base_cls in self._cls.__mro__[1:-1]: + if base_cls.__dict__.get("__weakref__", None) is not None: + weakref_inherited = True + existing_slots.update( + { + name: getattr(base_cls, name) + for name in getattr(base_cls, "__slots__", []) + } + ) + + base_names = set(self._base_names) + + names = self._attr_names + if ( + self._weakref_slot + and "__weakref__" not in getattr(self._cls, "__slots__", ()) + and "__weakref__" not in names + and not weakref_inherited + ): + names += ("__weakref__",) + + # We only add the names of attributes that aren't inherited. + # Setting __slots__ to inherited attributes wastes memory. + slot_names = [name for name in names if name not in base_names] + # There are slots for attributes from current class + # that are defined in parent classes. + # As their descriptors may be overridden by a child class, + # we collect them here and update the class dict + reused_slots = { + slot: slot_descriptor + for slot, slot_descriptor in existing_slots.items() + if slot in slot_names + } + slot_names = [name for name in slot_names if name not in reused_slots] + cd.update(reused_slots) + if self._cache_hash: + slot_names.append(_hash_cache_field) + cd["__slots__"] = tuple(slot_names) + + cd["__qualname__"] = self._cls.__qualname__ + + # Create new class based on old class and our methods. + cls = type(self._cls)(self._cls.__name__, self._cls.__bases__, cd) + + # The following is a fix for + # . On Python 3, + # if a method mentions `__class__` or uses the no-arg super(), the + # compiler will bake a reference to the class in the method itself + # as `method.__closure__`. Since we replace the class with a + # clone, we rewrite these references so it keeps working. + for item in cls.__dict__.values(): + if isinstance(item, (classmethod, staticmethod)): + # Class- and staticmethods hide their functions inside. + # These might need to be rewritten as well. + closure_cells = getattr(item.__func__, "__closure__", None) + elif isinstance(item, property): + # Workaround for property `super()` shortcut (PY3-only). + # There is no universal way for other descriptors. + closure_cells = getattr(item.fget, "__closure__", None) + else: + closure_cells = getattr(item, "__closure__", None) + + if not closure_cells: # Catch None or the empty list. + continue + for cell in closure_cells: + try: + match = cell.cell_contents is self._cls + except ValueError: # ValueError: Cell is empty + pass + else: + if match: + set_closure_cell(cell, cls) + + return cls + + def add_repr(self, ns): + self._cls_dict["__repr__"] = self._add_method_dunders( + _make_repr(self._attrs, ns, self._cls) + ) + return self + + def add_str(self): + repr = self._cls_dict.get("__repr__") + if repr is None: + raise ValueError( + "__str__ can only be generated if a __repr__ exists." + ) + + def __str__(self): + return self.__repr__() + + self._cls_dict["__str__"] = self._add_method_dunders(__str__) + return self + + def _make_getstate_setstate(self): + """ + Create custom __setstate__ and __getstate__ methods. + """ + # __weakref__ is not writable. + state_attr_names = tuple( + an for an in self._attr_names if an != "__weakref__" + ) + + def slots_getstate(self): + """ + Automatically created by attrs. + """ + return tuple(getattr(self, name) for name in state_attr_names) + + hash_caching_enabled = self._cache_hash + + def slots_setstate(self, state): + """ + Automatically created by attrs. + """ + __bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in zip(state_attr_names, state): + __bound_setattr(name, value) + + # The hash code cache is not included when the object is + # serialized, but it still needs to be initialized to None to + # indicate that the first call to __hash__ should be a cache + # miss. + if hash_caching_enabled: + __bound_setattr(_hash_cache_field, None) + + return slots_getstate, slots_setstate + + def make_unhashable(self): + self._cls_dict["__hash__"] = None + return self + + def add_hash(self): + self._cls_dict["__hash__"] = self._add_method_dunders( + _make_hash( + self._cls, + self._attrs, + frozen=self._frozen, + cache_hash=self._cache_hash, + ) + ) + + return self + + def add_init(self): + self._cls_dict["__init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=False, + ) + ) + + return self + + def add_match_args(self): + self._cls_dict["__match_args__"] = tuple( + field.name + for field in self._attrs + if field.init and not field.kw_only + ) + + def add_attrs_init(self): + self._cls_dict["__attrs_init__"] = self._add_method_dunders( + _make_init( + self._cls, + self._attrs, + self._has_pre_init, + self._has_post_init, + self._frozen, + self._slots, + self._cache_hash, + self._base_attr_map, + self._is_exc, + self._on_setattr, + attrs_init=True, + ) + ) + + return self + + def add_eq(self): + cd = self._cls_dict + + cd["__eq__"] = self._add_method_dunders( + _make_eq(self._cls, self._attrs) + ) + cd["__ne__"] = self._add_method_dunders(_make_ne()) + + return self + + def add_order(self): + cd = self._cls_dict + + cd["__lt__"], cd["__le__"], cd["__gt__"], cd["__ge__"] = ( + self._add_method_dunders(meth) + for meth in _make_order(self._cls, self._attrs) + ) + + return self + + def add_setattr(self): + if self._frozen: + return self + + sa_attrs = {} + for a in self._attrs: + on_setattr = a.on_setattr or self._on_setattr + if on_setattr and on_setattr is not setters.NO_OP: + sa_attrs[a.name] = a, on_setattr + + if not sa_attrs: + return self + + if self._has_custom_setattr: + # We need to write a __setattr__ but there already is one! + raise ValueError( + "Can't combine custom __setattr__ with on_setattr hooks." + ) + + # docstring comes from _add_method_dunders + def __setattr__(self, name, val): + try: + a, hook = sa_attrs[name] + except KeyError: + nval = val + else: + nval = hook(self, a, val) + + _obj_setattr(self, name, nval) + + self._cls_dict["__attrs_own_setattr__"] = True + self._cls_dict["__setattr__"] = self._add_method_dunders(__setattr__) + self._wrote_own_setattr = True + + return self + + def _add_method_dunders(self, method): + """ + Add __module__ and __qualname__ to a *method* if possible. + """ + try: + method.__module__ = self._cls.__module__ + except AttributeError: + pass + + try: + method.__qualname__ = ".".join( + (self._cls.__qualname__, method.__name__) + ) + except AttributeError: + pass + + try: + method.__doc__ = "Method generated by attrs for class %s." % ( + self._cls.__qualname__, + ) + except AttributeError: + pass + + return method + + +def _determine_attrs_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + return cmp, cmp + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq = default_eq + + if order is None: + order = eq + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, order + + +def _determine_attrib_eq_order(cmp, eq, order, default_eq): + """ + Validate the combination of *cmp*, *eq*, and *order*. Derive the effective + values of eq and order. If *eq* is None, set it to *default_eq*. + """ + if cmp is not None and any((eq is not None, order is not None)): + raise ValueError("Don't mix `cmp` with `eq' and `order`.") + + def decide_callable_or_boolean(value): + """ + Decide whether a key function is used. + """ + if callable(value): + value, key = True, value + else: + key = None + return value, key + + # cmp takes precedence due to bw-compatibility. + if cmp is not None: + cmp, cmp_key = decide_callable_or_boolean(cmp) + return cmp, cmp_key, cmp, cmp_key + + # If left None, equality is set to the specified default and ordering + # mirrors equality. + if eq is None: + eq, eq_key = default_eq, None + else: + eq, eq_key = decide_callable_or_boolean(eq) + + if order is None: + order, order_key = eq, eq_key + else: + order, order_key = decide_callable_or_boolean(order) + + if eq is False and order is True: + raise ValueError("`order` can only be True if `eq` is True too.") + + return eq, eq_key, order, order_key + + +def _determine_whether_to_implement( + cls, flag, auto_detect, dunders, default=True +): + """ + Check whether we should implement a set of methods for *cls*. + + *flag* is the argument passed into @attr.s like 'init', *auto_detect* the + same as passed into @attr.s and *dunders* is a tuple of attribute names + whose presence signal that the user has implemented it themselves. + + Return *default* if no reason for either for or against is found. + """ + if flag is True or flag is False: + return flag + + if flag is None and auto_detect is False: + return default + + # Logically, flag is None and auto_detect is True here. + for dunder in dunders: + if _has_own_attribute(cls, dunder): + return False + + return default + + +def attrs( + maybe_cls=None, + these=None, + repr_ns=None, + repr=None, + cmp=None, + hash=None, + init=None, + slots=False, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=False, + kw_only=False, + cache_hash=False, + auto_exc=False, + eq=None, + order=None, + auto_detect=False, + collect_by_mro=False, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + A class decorator that adds `dunder + `_\ -methods according to the + specified attributes using `attr.ib` or the *these* argument. + + :param these: A dictionary of name to `attr.ib` mappings. This is + useful to avoid the definition of your attributes within the class body + because you can't (e.g. if you want to add ``__repr__`` methods to + Django models) or don't want to. + + If *these* is not ``None``, ``attrs`` will *not* search the class body + for attributes and will *not* remove any attributes from it. + + If *these* is an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the attributes inside *these*. Otherwise the order + of the definition of the attributes is used. + + :type these: `dict` of `str` to `attr.ib` + + :param str repr_ns: When using nested classes, there's no way in Python 2 + to automatically detect that. Therefore it's possible to set the + namespace explicitly for a more meaningful ``repr`` output. + :param bool auto_detect: Instead of setting the *init*, *repr*, *eq*, + *order*, and *hash* arguments explicitly, assume they are set to + ``True`` **unless any** of the involved methods for one of the + arguments is implemented in the *current* class (i.e. it is *not* + inherited from some base class). + + So for example by implementing ``__eq__`` on a class yourself, + ``attrs`` will deduce ``eq=False`` and will create *neither* + ``__eq__`` *nor* ``__ne__`` (but Python classes come with a sensible + ``__ne__`` by default, so it *should* be enough to only implement + ``__eq__`` in most cases). + + .. warning:: + + If you prevent ``attrs`` from creating the ordering methods for you + (``order=False``, e.g. by implementing ``__le__``), it becomes + *your* responsibility to make sure its ordering is sound. The best + way is to use the `functools.total_ordering` decorator. + + + Passing ``True`` or ``False`` to *init*, *repr*, *eq*, *order*, + *cmp*, or *hash* overrides whatever *auto_detect* would determine. + + *auto_detect* requires Python 3. Setting it ``True`` on Python 2 raises + an `attrs.exceptions.PythonTooOldError`. + + :param bool repr: Create a ``__repr__`` method with a human readable + representation of ``attrs`` attributes.. + :param bool str: Create a ``__str__`` method that is identical to + ``__repr__``. This is usually not necessary except for + `Exception`\ s. + :param Optional[bool] eq: If ``True`` or ``None`` (default), add ``__eq__`` + and ``__ne__`` methods that check two instances for equality. + + They compare the instances as if they were tuples of their ``attrs`` + attributes if and only if the types of both classes are *identical*! + :param Optional[bool] order: If ``True``, add ``__lt__``, ``__le__``, + ``__gt__``, and ``__ge__`` methods that behave like *eq* above and + allow instances to be ordered. If ``None`` (default) mirror value of + *eq*. + :param Optional[bool] cmp: Setting *cmp* is equivalent to setting *eq* + and *order* to the same value. Must not be mixed with *eq* or *order*. + :param Optional[bool] hash: If ``None`` (default), the ``__hash__`` method + is generated according how *eq* and *frozen* are set. + + 1. If *both* are True, ``attrs`` will generate a ``__hash__`` for you. + 2. If *eq* is True and *frozen* is False, ``__hash__`` will be set to + None, marking it unhashable (which it is). + 3. If *eq* is False, ``__hash__`` will be left untouched meaning the + ``__hash__`` method of the base class will be used (if base class is + ``object``, this means it will fall back to id-based hashing.). + + Although not recommended, you can decide for yourself and force + ``attrs`` to create one (e.g. if the class is immutable even though you + didn't freeze it programmatically) by passing ``True`` or not. Both of + these cases are rather special and should be used carefully. + + See our documentation on `hashing`, Python's documentation on + `object.__hash__`, and the `GitHub issue that led to the default \ + behavior `_ for more + details. + :param bool init: Create a ``__init__`` method that initializes the + ``attrs`` attributes. Leading underscores are stripped for the argument + name. If a ``__attrs_pre_init__`` method exists on the class, it will + be called before the class is initialized. If a ``__attrs_post_init__`` + method exists on the class, it will be called after the class is fully + initialized. + + If ``init`` is ``False``, an ``__attrs_init__`` method will be + injected instead. This allows you to define a custom ``__init__`` + method that can do pre-init work such as ``super().__init__()``, + and then call ``__attrs_init__()`` and ``__attrs_post_init__()``. + :param bool slots: Create a `slotted class ` that's more + memory-efficient. Slotted classes are generally superior to the default + dict classes, but have some gotchas you should know about, so we + encourage you to read the `glossary entry `. + :param bool frozen: Make instances immutable after initialization. If + someone attempts to modify a frozen instance, + `attr.exceptions.FrozenInstanceError` is raised. + + .. note:: + + 1. This is achieved by installing a custom ``__setattr__`` method + on your class, so you can't implement your own. + + 2. True immutability is impossible in Python. + + 3. This *does* have a minor a runtime performance `impact + ` when initializing new instances. In other words: + ``__init__`` is slightly slower with ``frozen=True``. + + 4. If a class is frozen, you cannot modify ``self`` in + ``__attrs_post_init__`` or a self-written ``__init__``. You can + circumvent that limitation by using + ``object.__setattr__(self, "attribute_name", value)``. + + 5. Subclasses of a frozen class are frozen too. + + :param bool weakref_slot: Make instances weak-referenceable. This has no + effect unless ``slots`` is also enabled. + :param bool auto_attribs: If ``True``, collect :pep:`526`-annotated + attributes (Python 3.6 and later only) from the class body. + + In this case, you **must** annotate every field. If ``attrs`` + encounters a field that is set to an `attr.ib` but lacks a type + annotation, an `attr.exceptions.UnannotatedAttributeError` is + raised. Use ``field_name: typing.Any = attr.ib(...)`` if you don't + want to set a type. + + If you assign a value to those attributes (e.g. ``x: int = 42``), that + value becomes the default value like if it were passed using + ``attr.ib(default=42)``. Passing an instance of `attrs.Factory` also + works as expected in most cases (see warning below). + + Attributes annotated as `typing.ClassVar`, and attributes that are + neither annotated nor set to an `attr.ib` are **ignored**. + + .. warning:: + For features that use the attribute name to create decorators (e.g. + `validators `), you still *must* assign `attr.ib` to + them. Otherwise Python will either not find the name or try to use + the default value to call e.g. ``validator`` on it. + + These errors can be quite confusing and probably the most common bug + report on our bug tracker. + + :param bool kw_only: Make all attributes keyword-only (Python 3+) + in the generated ``__init__`` (if ``init`` is ``False``, this + parameter is ignored). + :param bool cache_hash: Ensure that the object's hash code is computed + only once and stored on the object. If this is set to ``True``, + hashing must be either explicitly or implicitly enabled for this + class. If the hash code is cached, avoid any reassignments of + fields involved in hash code computation or mutations of the objects + those fields point to after object creation. If such changes occur, + the behavior of the object's hash code is undefined. + :param bool auto_exc: If the class subclasses `BaseException` + (which implicitly includes any subclass of any exception), the + following happens to behave like a well-behaved Python exceptions + class: + + - the values for *eq*, *order*, and *hash* are ignored and the + instances compare and hash by the instance's ids (N.B. ``attrs`` will + *not* remove existing implementations of ``__hash__`` or the equality + methods. It just won't add own ones.), + - all attributes that are either passed into ``__init__`` or have a + default value are additionally available as a tuple in the ``args`` + attribute, + - the value of *str* is ignored leaving ``__str__`` to base classes. + :param bool collect_by_mro: Setting this to `True` fixes the way ``attrs`` + collects attributes from base classes. The default behavior is + incorrect in certain cases of multiple inheritance. It should be on by + default but is kept off for backward-compatibility. + + See issue `#428 `_ for + more details. + + :param Optional[bool] getstate_setstate: + .. note:: + This is usually only interesting for slotted classes and you should + probably just set *auto_detect* to `True`. + + If `True`, ``__getstate__`` and + ``__setstate__`` are generated and attached to the class. This is + necessary for slotted classes to be pickleable. If left `None`, it's + `True` by default for slotted classes and ``False`` for dict classes. + + If *auto_detect* is `True`, and *getstate_setstate* is left `None`, + and **either** ``__getstate__`` or ``__setstate__`` is detected directly + on the class (i.e. not inherited), it is set to `False` (this is usually + what you want). + + :param on_setattr: A callable that is run whenever the user attempts to set + an attribute (either by assignment like ``i.x = 42`` or by using + `setattr` like ``setattr(i, "x", 42)``). It receives the same arguments + as validators: the instance, the attribute that is being modified, and + the new value. + + If no exception is raised, the attribute is set to the return value of + the callable. + + If a list of callables is passed, they're automatically wrapped in an + `attrs.setters.pipe`. + :type on_setattr: `callable`, or a list of callables, or `None`, or + `attrs.setters.NO_OP` + + :param Optional[callable] field_transformer: + A function that is called with the original class object and all + fields right before ``attrs`` finalizes the class. You can use + this, e.g., to automatically add converters or validators to + fields based on their types. See `transform-fields` for more details. + + :param bool match_args: + If `True` (default), set ``__match_args__`` on the class to support + :pep:`634` (Structural Pattern Matching). It is a tuple of all + non-keyword-only ``__init__`` parameter names on Python 3.10 and later. + Ignored on older Python versions. + + .. versionadded:: 16.0.0 *slots* + .. versionadded:: 16.1.0 *frozen* + .. versionadded:: 16.3.0 *str* + .. versionadded:: 16.3.0 Support for ``__attrs_post_init__``. + .. versionchanged:: 17.1.0 + *hash* supports ``None`` as value which is also the default now. + .. versionadded:: 17.3.0 *auto_attribs* + .. versionchanged:: 18.1.0 + If *these* is passed, no attributes are deleted from the class body. + .. versionchanged:: 18.1.0 If *these* is ordered, the order is retained. + .. versionadded:: 18.2.0 *weakref_slot* + .. deprecated:: 18.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now raise a + `DeprecationWarning` if the classes compared are subclasses of + each other. ``__eq`` and ``__ne__`` never tried to compared subclasses + to each other. + .. versionchanged:: 19.2.0 + ``__lt__``, ``__le__``, ``__gt__``, and ``__ge__`` now do not consider + subclasses comparable anymore. + .. versionadded:: 18.2.0 *kw_only* + .. versionadded:: 18.2.0 *cache_hash* + .. versionadded:: 19.1.0 *auto_exc* + .. deprecated:: 19.2.0 *cmp* Removal on or after 2021-06-01. + .. versionadded:: 19.2.0 *eq* and *order* + .. versionadded:: 20.1.0 *auto_detect* + .. versionadded:: 20.1.0 *collect_by_mro* + .. versionadded:: 20.1.0 *getstate_setstate* + .. versionadded:: 20.1.0 *on_setattr* + .. versionadded:: 20.3.0 *field_transformer* + .. versionchanged:: 21.1.0 + ``init=False`` injects ``__attrs_init__`` + .. versionchanged:: 21.1.0 Support for ``__attrs_pre_init__`` + .. versionchanged:: 21.1.0 *cmp* undeprecated + .. versionadded:: 21.3.0 *match_args* + """ + eq_, order_ = _determine_attrs_eq_order(cmp, eq, order, None) + hash_ = hash # work around the lack of nonlocal + + if isinstance(on_setattr, (list, tuple)): + on_setattr = setters.pipe(*on_setattr) + + def wrap(cls): + is_frozen = frozen or _has_frozen_base_class(cls) + is_exc = auto_exc is True and issubclass(cls, BaseException) + has_own_setattr = auto_detect and _has_own_attribute( + cls, "__setattr__" + ) + + if has_own_setattr and is_frozen: + raise ValueError("Can't freeze a class with a custom __setattr__.") + + builder = _ClassBuilder( + cls, + these, + slots, + is_frozen, + weakref_slot, + _determine_whether_to_implement( + cls, + getstate_setstate, + auto_detect, + ("__getstate__", "__setstate__"), + default=slots, + ), + auto_attribs, + kw_only, + cache_hash, + is_exc, + collect_by_mro, + on_setattr, + has_own_setattr, + field_transformer, + ) + if _determine_whether_to_implement( + cls, repr, auto_detect, ("__repr__",) + ): + builder.add_repr(repr_ns) + if str is True: + builder.add_str() + + eq = _determine_whether_to_implement( + cls, eq_, auto_detect, ("__eq__", "__ne__") + ) + if not is_exc and eq is True: + builder.add_eq() + if not is_exc and _determine_whether_to_implement( + cls, order_, auto_detect, ("__lt__", "__le__", "__gt__", "__ge__") + ): + builder.add_order() + + builder.add_setattr() + + if ( + hash_ is None + and auto_detect is True + and _has_own_attribute(cls, "__hash__") + ): + hash = False + else: + hash = hash_ + if hash is not True and hash is not False and hash is not None: + # Can't use `hash in` because 1 == True for example. + raise TypeError( + "Invalid value for hash. Must be True, False, or None." + ) + elif hash is False or (hash is None and eq is False) or is_exc: + # Don't do anything. Should fall back to __object__'s __hash__ + # which is by id. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + elif hash is True or ( + hash is None and eq is True and is_frozen is True + ): + # Build a __hash__ if told so, or if it's safe. + builder.add_hash() + else: + # Raise TypeError on attempts to hash. + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " hashing must be either explicitly or implicitly " + "enabled." + ) + builder.make_unhashable() + + if _determine_whether_to_implement( + cls, init, auto_detect, ("__init__",) + ): + builder.add_init() + else: + builder.add_attrs_init() + if cache_hash: + raise TypeError( + "Invalid value for cache_hash. To use hash caching," + " init must be True." + ) + + if ( + PY310 + and match_args + and not _has_own_attribute(cls, "__match_args__") + ): + builder.add_match_args() + + return builder.build_class() + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +_attrs = attrs +""" +Internal alias so we can use it in functions that take an argument called +*attrs*. +""" + + +def _has_frozen_base_class(cls): + """ + Check whether *cls* has a frozen ancestor by looking at its + __setattr__. + """ + return cls.__setattr__ is _frozen_setattrs + + +def _generate_unique_filename(cls, func_name): + """ + Create a "filename" suitable for a function being generated. + """ + unique_filename = "".format( + func_name, + cls.__module__, + getattr(cls, "__qualname__", cls.__name__), + ) + return unique_filename + + +def _make_hash(cls, attrs, frozen, cache_hash): + attrs = tuple( + a for a in attrs if a.hash is True or (a.hash is None and a.eq is True) + ) + + tab = " " + + unique_filename = _generate_unique_filename(cls, "hash") + type_hash = hash(unique_filename) + # If eq is custom generated, we need to include the functions in globs + globs = {} + + hash_def = "def __hash__(self" + hash_func = "hash((" + closing_braces = "))" + if not cache_hash: + hash_def += "):" + else: + hash_def += ", *" + + hash_def += ( + ", _cache_wrapper=" + + "__import__('attr._make')._make._CacheHashWrapper):" + ) + hash_func = "_cache_wrapper(" + hash_func + closing_braces += ")" + + method_lines = [hash_def] + + def append_hash_computation_lines(prefix, indent): + """ + Generate the code for actually computing the hash code. + Below this will either be returned directly or used to compute + a value which is then cached, depending on the value of cache_hash + """ + + method_lines.extend( + [ + indent + prefix + hash_func, + indent + " %d," % (type_hash,), + ] + ) + + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + globs[cmp_name] = a.eq_key + method_lines.append( + indent + " %s(self.%s)," % (cmp_name, a.name) + ) + else: + method_lines.append(indent + " self.%s," % a.name) + + method_lines.append(indent + " " + closing_braces) + + if cache_hash: + method_lines.append(tab + "if self.%s is None:" % _hash_cache_field) + if frozen: + append_hash_computation_lines( + "object.__setattr__(self, '%s', " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab * 2 + ")") # close __setattr__ + else: + append_hash_computation_lines( + "self.%s = " % _hash_cache_field, tab * 2 + ) + method_lines.append(tab + "return self.%s" % _hash_cache_field) + else: + append_hash_computation_lines("return ", tab) + + script = "\n".join(method_lines) + return _make_method("__hash__", script, unique_filename, globs) + + +def _add_hash(cls, attrs): + """ + Add a hash method to *cls*. + """ + cls.__hash__ = _make_hash(cls, attrs, frozen=False, cache_hash=False) + return cls + + +def _make_ne(): + """ + Create __ne__ method. + """ + + def __ne__(self, other): + """ + Check equality and either forward a NotImplemented or + return the result negated. + """ + result = self.__eq__(other) + if result is NotImplemented: + return NotImplemented + + return not result + + return __ne__ + + +def _make_eq(cls, attrs): + """ + Create __eq__ method for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.eq] + + unique_filename = _generate_unique_filename(cls, "eq") + lines = [ + "def __eq__(self, other):", + " if other.__class__ is not self.__class__:", + " return NotImplemented", + ] + + # We can't just do a big self.x = other.x and... clause due to + # irregularities like nan == nan is false but (nan,) == (nan,) is true. + globs = {} + if attrs: + lines.append(" return (") + others = [" ) == ("] + for a in attrs: + if a.eq_key: + cmp_name = "_%s_key" % (a.name,) + # Add the key function to the global namespace + # of the evaluated function. + globs[cmp_name] = a.eq_key + lines.append( + " %s(self.%s)," + % ( + cmp_name, + a.name, + ) + ) + others.append( + " %s(other.%s)," + % ( + cmp_name, + a.name, + ) + ) + else: + lines.append(" self.%s," % (a.name,)) + others.append(" other.%s," % (a.name,)) + + lines += others + [" )"] + else: + lines.append(" return True") + + script = "\n".join(lines) + + return _make_method("__eq__", script, unique_filename, globs) + + +def _make_order(cls, attrs): + """ + Create ordering methods for *cls* with *attrs*. + """ + attrs = [a for a in attrs if a.order] + + def attrs_to_tuple(obj): + """ + Save us some typing. + """ + return tuple( + key(value) if key else value + for value, key in ( + (getattr(obj, a.name), a.order_key) for a in attrs + ) + ) + + def __lt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) < attrs_to_tuple(other) + + return NotImplemented + + def __le__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) <= attrs_to_tuple(other) + + return NotImplemented + + def __gt__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) > attrs_to_tuple(other) + + return NotImplemented + + def __ge__(self, other): + """ + Automatically created by attrs. + """ + if other.__class__ is self.__class__: + return attrs_to_tuple(self) >= attrs_to_tuple(other) + + return NotImplemented + + return __lt__, __le__, __gt__, __ge__ + + +def _add_eq(cls, attrs=None): + """ + Add equality methods to *cls* with *attrs*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__eq__ = _make_eq(cls, attrs) + cls.__ne__ = _make_ne() + + return cls + + +if HAS_F_STRINGS: + + def _make_repr(attrs, ns, cls): + unique_filename = _generate_unique_filename(cls, "repr") + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, (repr if a.repr is True else a.repr), a.init) + for a in attrs + if a.repr is not False + ) + globs = { + name + "_repr": r + for name, r, _ in attr_names_with_reprs + if r != repr + } + globs["_compat"] = _compat + globs["AttributeError"] = AttributeError + globs["NOTHING"] = NOTHING + attribute_fragments = [] + for name, r, i in attr_names_with_reprs: + accessor = ( + "self." + name + if i + else 'getattr(self, "' + name + '", NOTHING)' + ) + fragment = ( + "%s={%s!r}" % (name, accessor) + if r == repr + else "%s={%s_repr(%s)}" % (name, name, accessor) + ) + attribute_fragments.append(fragment) + repr_fragment = ", ".join(attribute_fragments) + + if ns is None: + cls_name_fragment = ( + '{self.__class__.__qualname__.rsplit(">.", 1)[-1]}' + ) + else: + cls_name_fragment = ns + ".{self.__class__.__name__}" + + lines = [ + "def __repr__(self):", + " try:", + " already_repring = _compat.repr_context.already_repring", + " except AttributeError:", + " already_repring = {id(self),}", + " _compat.repr_context.already_repring = already_repring", + " else:", + " if id(self) in already_repring:", + " return '...'", + " else:", + " already_repring.add(id(self))", + " try:", + " return f'%s(%s)'" % (cls_name_fragment, repr_fragment), + " finally:", + " already_repring.remove(id(self))", + ] + + return _make_method( + "__repr__", "\n".join(lines), unique_filename, globs=globs + ) + +else: + + def _make_repr(attrs, ns, _): + """ + Make a repr method that includes relevant *attrs*, adding *ns* to the + full name. + """ + + # Figure out which attributes to include, and which function to use to + # format them. The a.repr value can be either bool or a custom + # callable. + attr_names_with_reprs = tuple( + (a.name, repr if a.repr is True else a.repr) + for a in attrs + if a.repr is not False + ) + + def __repr__(self): + """ + Automatically created by attrs. + """ + try: + already_repring = _compat.repr_context.already_repring + except AttributeError: + already_repring = set() + _compat.repr_context.already_repring = already_repring + + if id(self) in already_repring: + return "..." + real_cls = self.__class__ + if ns is None: + class_name = real_cls.__qualname__.rsplit(">.", 1)[-1] + else: + class_name = ns + "." + real_cls.__name__ + + # Since 'self' remains on the stack (i.e.: strongly referenced) + # for the duration of this call, it's safe to depend on id(...) + # stability, and not need to track the instance and therefore + # worry about properties like weakref- or hash-ability. + already_repring.add(id(self)) + try: + result = [class_name, "("] + first = True + for name, attr_repr in attr_names_with_reprs: + if first: + first = False + else: + result.append(", ") + result.extend( + (name, "=", attr_repr(getattr(self, name, NOTHING))) + ) + return "".join(result) + ")" + finally: + already_repring.remove(id(self)) + + return __repr__ + + +def _add_repr(cls, ns=None, attrs=None): + """ + Add a repr method to *cls*. + """ + if attrs is None: + attrs = cls.__attrs_attrs__ + + cls.__repr__ = _make_repr(attrs, ns, cls) + return cls + + +def fields(cls): + """ + Return the tuple of ``attrs`` attributes for a class. + + The tuple also allows accessing the fields by their names (see below for + examples). + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: tuple (with name accessors) of `attrs.Attribute` + + .. versionchanged:: 16.2.0 Returned tuple allows accessing the fields + by name. + """ + if not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return attrs + + +def fields_dict(cls): + """ + Return an ordered dictionary of ``attrs`` attributes for a class, whose + keys are the attribute names. + + :param type cls: Class to introspect. + + :raise TypeError: If *cls* is not a class. + :raise attr.exceptions.NotAnAttrsClassError: If *cls* is not an ``attrs`` + class. + + :rtype: an ordered dict where keys are attribute names and values are + `attrs.Attribute`\\ s. This will be a `dict` if it's + naturally ordered like on Python 3.6+ or an + :class:`~collections.OrderedDict` otherwise. + + .. versionadded:: 18.1.0 + """ + if not isinstance(cls, type): + raise TypeError("Passed object must be a class.") + attrs = getattr(cls, "__attrs_attrs__", None) + if attrs is None: + raise NotAnAttrsClassError( + "{cls!r} is not an attrs-decorated class.".format(cls=cls) + ) + return ordered_dict((a.name, a) for a in attrs) + + +def validate(inst): + """ + Validate all attributes on *inst* that have a validator. + + Leaves all exceptions through. + + :param inst: Instance of a class with ``attrs`` attributes. + """ + if _config._run_validators is False: + return + + for a in fields(inst.__class__): + v = a.validator + if v is not None: + v(inst, a, getattr(inst, a.name)) + + +def _is_slot_cls(cls): + return "__slots__" in cls.__dict__ + + +def _is_slot_attr(a_name, base_attr_map): + """ + Check if the attribute name comes from a slot class. + """ + return a_name in base_attr_map and _is_slot_cls(base_attr_map[a_name]) + + +def _make_init( + cls, + attrs, + pre_init, + post_init, + frozen, + slots, + cache_hash, + base_attr_map, + is_exc, + cls_on_setattr, + attrs_init, +): + has_cls_on_setattr = ( + cls_on_setattr is not None and cls_on_setattr is not setters.NO_OP + ) + + if frozen and has_cls_on_setattr: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = cache_hash or frozen + filtered_attrs = [] + attr_dict = {} + for a in attrs: + if not a.init and a.default is NOTHING: + continue + + filtered_attrs.append(a) + attr_dict[a.name] = a + + if a.on_setattr is not None: + if frozen is True: + raise ValueError("Frozen classes can't use on_setattr.") + + needs_cached_setattr = True + elif has_cls_on_setattr and a.on_setattr is not setters.NO_OP: + needs_cached_setattr = True + + unique_filename = _generate_unique_filename(cls, "init") + + script, globs, annotations = _attrs_to_init_script( + filtered_attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + has_cls_on_setattr, + attrs_init, + ) + if cls.__module__ in sys.modules: + # This makes typing.get_type_hints(CLS.__init__) resolve string types. + globs.update(sys.modules[cls.__module__].__dict__) + + globs.update({"NOTHING": NOTHING, "attr_dict": attr_dict}) + + if needs_cached_setattr: + # Save the lookup overhead in __init__ if we need to circumvent + # setattr hooks. + globs["_setattr"] = _obj_setattr + + init = _make_method( + "__attrs_init__" if attrs_init else "__init__", + script, + unique_filename, + globs, + ) + init.__annotations__ = annotations + + return init + + +def _setattr(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*. + """ + return "_setattr(self, '%s', %s)" % (attr_name, value_var) + + +def _setattr_with_converter(attr_name, value_var, has_on_setattr): + """ + Use the cached object.setattr to set *attr_name* to *value_var*, but run + its converter first. + """ + return "_setattr(self, '%s', %s(%s))" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _assign(attr_name, value, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment. Otherwise + relegate to _setattr. + """ + if has_on_setattr: + return _setattr(attr_name, value, True) + + return "self.%s = %s" % (attr_name, value) + + +def _assign_with_converter(attr_name, value_var, has_on_setattr): + """ + Unless *attr_name* has an on_setattr hook, use normal assignment after + conversion. Otherwise relegate to _setattr_with_converter. + """ + if has_on_setattr: + return _setattr_with_converter(attr_name, value_var, True) + + return "self.%s = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + +def _attrs_to_init_script( + attrs, + frozen, + slots, + pre_init, + post_init, + cache_hash, + base_attr_map, + is_exc, + has_cls_on_setattr, + attrs_init, +): + """ + Return a script of an initializer for *attrs* and a dict of globals. + + The globals are expected by the generated script. + + If *frozen* is True, we cannot set the attributes directly so we use + a cached ``object.__setattr__``. + """ + lines = [] + if pre_init: + lines.append("self.__attrs_pre_init__()") + + if frozen is True: + if slots is True: + fmt_setter = _setattr + fmt_setter_with_converter = _setattr_with_converter + else: + # Dict frozen classes assign directly to __dict__. + # But only if the attribute doesn't come from an ancestor slot + # class. + # Note _inst_dict will be used again below if cache_hash is True + lines.append("_inst_dict = self.__dict__") + + def fmt_setter(attr_name, value_var, has_on_setattr): + if _is_slot_attr(attr_name, base_attr_map): + return _setattr(attr_name, value_var, has_on_setattr) + + return "_inst_dict['%s'] = %s" % (attr_name, value_var) + + def fmt_setter_with_converter( + attr_name, value_var, has_on_setattr + ): + if has_on_setattr or _is_slot_attr(attr_name, base_attr_map): + return _setattr_with_converter( + attr_name, value_var, has_on_setattr + ) + + return "_inst_dict['%s'] = %s(%s)" % ( + attr_name, + _init_converter_pat % (attr_name,), + value_var, + ) + + else: + # Not frozen. + fmt_setter = _assign + fmt_setter_with_converter = _assign_with_converter + + args = [] + kw_only_args = [] + attrs_to_validate = [] + + # This is a dictionary of names to validator and converter callables. + # Injecting this into __init__ globals lets us avoid lookups. + names_for_globals = {} + annotations = {"return": None} + + for a in attrs: + if a.validator: + attrs_to_validate.append(a) + + attr_name = a.name + has_on_setattr = a.on_setattr is not None or ( + a.on_setattr is not setters.NO_OP and has_cls_on_setattr + ) + arg_name = a.name.lstrip("_") + + has_factory = isinstance(a.default, Factory) + if has_factory and a.default.takes_self: + maybe_self = "self" + else: + maybe_self = "" + + if a.init is False: + if has_factory: + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + init_factory_name + "(%s)" % (maybe_self,), + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + conv_name = _init_converter_pat % (a.name,) + names_for_globals[conv_name] = a.converter + else: + lines.append( + fmt_setter( + attr_name, + "attr_dict['%s'].default" % (attr_name,), + has_on_setattr, + ) + ) + elif a.default is not NOTHING and not has_factory: + arg = "%s=attr_dict['%s'].default" % (arg_name, attr_name) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + elif has_factory: + arg = "%s=NOTHING" % (arg_name,) + if a.kw_only: + kw_only_args.append(arg) + else: + args.append(arg) + lines.append("if %s is not NOTHING:" % (arg_name,)) + + init_factory_name = _init_factory_pat.format(a.name) + if a.converter is not None: + lines.append( + " " + + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter_with_converter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append( + " " + fmt_setter(attr_name, arg_name, has_on_setattr) + ) + lines.append("else:") + lines.append( + " " + + fmt_setter( + attr_name, + init_factory_name + "(" + maybe_self + ")", + has_on_setattr, + ) + ) + names_for_globals[init_factory_name] = a.default.factory + else: + if a.kw_only: + kw_only_args.append(arg_name) + else: + args.append(arg_name) + + if a.converter is not None: + lines.append( + fmt_setter_with_converter( + attr_name, arg_name, has_on_setattr + ) + ) + names_for_globals[ + _init_converter_pat % (a.name,) + ] = a.converter + else: + lines.append(fmt_setter(attr_name, arg_name, has_on_setattr)) + + if a.init is True: + if a.type is not None and a.converter is None: + annotations[arg_name] = a.type + elif a.converter is not None: + # Try to get the type from the converter. + t = _AnnotationExtractor(a.converter).get_first_param_type() + if t: + annotations[arg_name] = t + + if attrs_to_validate: # we can skip this if there are no validators. + names_for_globals["_config"] = _config + lines.append("if _config._run_validators is True:") + for a in attrs_to_validate: + val_name = "__attr_validator_" + a.name + attr_name = "__attr_" + a.name + lines.append( + " %s(self, %s, self.%s)" % (val_name, attr_name, a.name) + ) + names_for_globals[val_name] = a.validator + names_for_globals[attr_name] = a + + if post_init: + lines.append("self.__attrs_post_init__()") + + # because this is set only after __attrs_post_init__ is called, a crash + # will result if post-init tries to access the hash code. This seemed + # preferable to setting this beforehand, in which case alteration to + # field values during post-init combined with post-init accessing the + # hash code would result in silent bugs. + if cache_hash: + if frozen: + if slots: + # if frozen and slots, then _setattr defined above + init_hash_cache = "_setattr(self, '%s', %s)" + else: + # if frozen and not slots, then _inst_dict defined above + init_hash_cache = "_inst_dict['%s'] = %s" + else: + init_hash_cache = "self.%s = %s" + lines.append(init_hash_cache % (_hash_cache_field, "None")) + + # For exceptions we rely on BaseException.__init__ for proper + # initialization. + if is_exc: + vals = ",".join("self." + a.name for a in attrs if a.init) + + lines.append("BaseException.__init__(self, %s)" % (vals,)) + + args = ", ".join(args) + if kw_only_args: + args += "%s*, %s" % ( + ", " if args else "", # leading comma + ", ".join(kw_only_args), # kw_only args + ) + return ( + """\ +def {init_name}(self, {args}): + {lines} +""".format( + init_name=("__attrs_init__" if attrs_init else "__init__"), + args=args, + lines="\n ".join(lines) if lines else "pass", + ), + names_for_globals, + annotations, + ) + + +class Attribute: + """ + *Read-only* representation of an attribute. + + The class has *all* arguments of `attr.ib` (except for ``factory`` + which is only syntactic sugar for ``default=Factory(...)`` plus the + following: + + - ``name`` (`str`): The name of the attribute. + - ``inherited`` (`bool`): Whether or not that attribute has been inherited + from a base class. + - ``eq_key`` and ``order_key`` (`typing.Callable` or `None`): The callables + that are used for comparing and ordering objects by this attribute, + respectively. These are set by passing a callable to `attr.ib`'s ``eq``, + ``order``, or ``cmp`` arguments. See also :ref:`comparison customization + `. + + Instances of this class are frequently used for introspection purposes + like: + + - `fields` returns a tuple of them. + - Validators get them passed as the first argument. + - The :ref:`field transformer ` hook receives a list of + them. + + .. versionadded:: 20.1.0 *inherited* + .. versionadded:: 20.1.0 *on_setattr* + .. versionchanged:: 20.2.0 *inherited* is not taken into account for + equality checks and hashing anymore. + .. versionadded:: 21.1.0 *eq_key* and *order_key* + + For the full version history of the fields, see `attr.ib`. + """ + + __slots__ = ( + "name", + "default", + "validator", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "type", + "converter", + "kw_only", + "inherited", + "on_setattr", + ) + + def __init__( + self, + name, + default, + validator, + repr, + cmp, # XXX: unused, remove along with other cmp code. + hash, + init, + inherited, + metadata=None, + type=None, + converter=None, + kw_only=False, + eq=None, + eq_key=None, + order=None, + order_key=None, + on_setattr=None, + ): + eq, eq_key, order, order_key = _determine_attrib_eq_order( + cmp, eq_key or eq, order_key or order, True + ) + + # Cache this descriptor here to speed things up later. + bound_setattr = _obj_setattr.__get__(self, Attribute) + + # Despite the big red warning, people *do* instantiate `Attribute` + # themselves. + bound_setattr("name", name) + bound_setattr("default", default) + bound_setattr("validator", validator) + bound_setattr("repr", repr) + bound_setattr("eq", eq) + bound_setattr("eq_key", eq_key) + bound_setattr("order", order) + bound_setattr("order_key", order_key) + bound_setattr("hash", hash) + bound_setattr("init", init) + bound_setattr("converter", converter) + bound_setattr( + "metadata", + ( + types.MappingProxyType(dict(metadata)) # Shallow copy + if metadata + else _empty_metadata_singleton + ), + ) + bound_setattr("type", type) + bound_setattr("kw_only", kw_only) + bound_setattr("inherited", inherited) + bound_setattr("on_setattr", on_setattr) + + def __setattr__(self, name, value): + raise FrozenInstanceError() + + @classmethod + def from_counting_attr(cls, name, ca, type=None): + # type holds the annotated value. deal with conflicts: + if type is None: + type = ca.type + elif ca.type is not None: + raise ValueError( + "Type annotation and type argument cannot both be present" + ) + inst_dict = { + k: getattr(ca, k) + for k in Attribute.__slots__ + if k + not in ( + "name", + "validator", + "default", + "type", + "inherited", + ) # exclude methods and deprecated alias + } + return cls( + name=name, + validator=ca._validator, + default=ca._default, + type=type, + cmp=None, + inherited=False, + **inst_dict + ) + + # Don't use attr.evolve since fields(Attribute) doesn't work + def evolve(self, **changes): + """ + Copy *self* and apply *changes*. + + This works similarly to `attr.evolve` but that function does not work + with ``Attribute``. + + It is mainly meant to be used for `transform-fields`. + + .. versionadded:: 20.3.0 + """ + new = copy.copy(self) + + new._setattrs(changes.items()) + + return new + + # Don't use _add_pickle since fields(Attribute) doesn't work + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple( + getattr(self, name) if name != "metadata" else dict(self.metadata) + for name in self.__slots__ + ) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + self._setattrs(zip(self.__slots__, state)) + + def _setattrs(self, name_values_pairs): + bound_setattr = _obj_setattr.__get__(self, Attribute) + for name, value in name_values_pairs: + if name != "metadata": + bound_setattr(name, value) + else: + bound_setattr( + name, + types.MappingProxyType(dict(value)) + if value + else _empty_metadata_singleton, + ) + + +_a = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=(name != "metadata"), + init=True, + inherited=False, + ) + for name in Attribute.__slots__ +] + +Attribute = _add_hash( + _add_eq( + _add_repr(Attribute, attrs=_a), + attrs=[a for a in _a if a.name != "inherited"], + ), + attrs=[a for a in _a if a.hash and a.name != "inherited"], +) + + +class _CountingAttr: + """ + Intermediate representation of attributes that uses a counter to preserve + the order in which the attributes have been defined. + + *Internal* data structure of the attrs library. Running into is most + likely the result of a bug like a forgotten `@attr.s` decorator. + """ + + __slots__ = ( + "counter", + "_default", + "repr", + "eq", + "eq_key", + "order", + "order_key", + "hash", + "init", + "metadata", + "_validator", + "converter", + "type", + "kw_only", + "on_setattr", + ) + __attrs_attrs__ = tuple( + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + hash=True, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ) + for name in ( + "counter", + "_default", + "repr", + "eq", + "order", + "hash", + "init", + "on_setattr", + ) + ) + ( + Attribute( + name="metadata", + default=None, + validator=None, + repr=True, + cmp=None, + hash=False, + init=True, + kw_only=False, + eq=True, + eq_key=None, + order=False, + order_key=None, + inherited=False, + on_setattr=None, + ), + ) + cls_counter = 0 + + def __init__( + self, + default, + validator, + repr, + cmp, + hash, + init, + converter, + metadata, + type, + kw_only, + eq, + eq_key, + order, + order_key, + on_setattr, + ): + _CountingAttr.cls_counter += 1 + self.counter = _CountingAttr.cls_counter + self._default = default + self._validator = validator + self.converter = converter + self.repr = repr + self.eq = eq + self.eq_key = eq_key + self.order = order + self.order_key = order_key + self.hash = hash + self.init = init + self.metadata = metadata + self.type = type + self.kw_only = kw_only + self.on_setattr = on_setattr + + def validator(self, meth): + """ + Decorator that adds *meth* to the list of validators. + + Returns *meth* unchanged. + + .. versionadded:: 17.1.0 + """ + if self._validator is None: + self._validator = meth + else: + self._validator = and_(self._validator, meth) + return meth + + def default(self, meth): + """ + Decorator that allows to set the default for an attribute. + + Returns *meth* unchanged. + + :raises DefaultAlreadySetError: If default has been set before. + + .. versionadded:: 17.1.0 + """ + if self._default is not NOTHING: + raise DefaultAlreadySetError() + + self._default = Factory(meth, takes_self=True) + + return meth + + +_CountingAttr = _add_eq(_add_repr(_CountingAttr)) + + +class Factory: + """ + Stores a factory callable. + + If passed as the default value to `attrs.field`, the factory is used to + generate a new value. + + :param callable factory: A callable that takes either none or exactly one + mandatory positional argument depending on *takes_self*. + :param bool takes_self: Pass the partially initialized instance that is + being initialized as a positional argument. + + .. versionadded:: 17.1.0 *takes_self* + """ + + __slots__ = ("factory", "takes_self") + + def __init__(self, factory, takes_self=False): + """ + `Factory` is part of the default machinery so if we want a default + value here, we have to implement it ourselves. + """ + self.factory = factory + self.takes_self = takes_self + + def __getstate__(self): + """ + Play nice with pickle. + """ + return tuple(getattr(self, name) for name in self.__slots__) + + def __setstate__(self, state): + """ + Play nice with pickle. + """ + for name, value in zip(self.__slots__, state): + setattr(self, name, value) + + +_f = [ + Attribute( + name=name, + default=NOTHING, + validator=None, + repr=True, + cmp=None, + eq=True, + order=False, + hash=True, + init=True, + inherited=False, + ) + for name in Factory.__slots__ +] + +Factory = _add_hash(_add_eq(_add_repr(Factory, attrs=_f), attrs=_f), attrs=_f) + + +def make_class(name, attrs, bases=(object,), **attributes_arguments): + """ + A quick way to create a new class called *name* with *attrs*. + + :param str name: The name for the new class. + + :param attrs: A list of names or a dictionary of mappings of names to + attributes. + + If *attrs* is a list or an ordered dict (`dict` on Python 3.6+, + `collections.OrderedDict` otherwise), the order is deduced from + the order of the names or attributes inside *attrs*. Otherwise the + order of the definition of the attributes is used. + :type attrs: `list` or `dict` + + :param tuple bases: Classes that the new class will subclass. + + :param attributes_arguments: Passed unmodified to `attr.s`. + + :return: A new class with *attrs*. + :rtype: type + + .. versionadded:: 17.1.0 *bases* + .. versionchanged:: 18.1.0 If *attrs* is ordered, the order is retained. + """ + if isinstance(attrs, dict): + cls_dict = attrs + elif isinstance(attrs, (list, tuple)): + cls_dict = {a: attrib() for a in attrs} + else: + raise TypeError("attrs argument must be a dict or a list.") + + pre_init = cls_dict.pop("__attrs_pre_init__", None) + post_init = cls_dict.pop("__attrs_post_init__", None) + user_init = cls_dict.pop("__init__", None) + + body = {} + if pre_init is not None: + body["__attrs_pre_init__"] = pre_init + if post_init is not None: + body["__attrs_post_init__"] = post_init + if user_init is not None: + body["__init__"] = user_init + + type_ = types.new_class(name, bases, {}, lambda ns: ns.update(body)) + + # For pickling to work, the __module__ variable needs to be set to the + # frame where the class is created. Bypass this step in environments where + # sys._getframe is not defined (Jython for example) or sys._getframe is not + # defined for arguments greater than 0 (IronPython). + try: + type_.__module__ = sys._getframe(1).f_globals.get( + "__name__", "__main__" + ) + except (AttributeError, ValueError): + pass + + # We do it here for proper warnings with meaningful stacklevel. + cmp = attributes_arguments.pop("cmp", None) + ( + attributes_arguments["eq"], + attributes_arguments["order"], + ) = _determine_attrs_eq_order( + cmp, + attributes_arguments.get("eq"), + attributes_arguments.get("order"), + True, + ) + + return _attrs(these=cls_dict, **attributes_arguments)(type_) + + +# These are required by within this module so we define them here and merely +# import into .validators / .converters. + + +@attrs(slots=True, hash=True) +class _AndValidator: + """ + Compose many validators to a single one. + """ + + _validators = attrib() + + def __call__(self, inst, attr, value): + for v in self._validators: + v(inst, attr, value) + + +def and_(*validators): + """ + A validator that composes multiple validators into one. + + When called on a value, it runs all wrapped validators. + + :param callables validators: Arbitrary number of validators. + + .. versionadded:: 17.1.0 + """ + vals = [] + for validator in validators: + vals.extend( + validator._validators + if isinstance(validator, _AndValidator) + else [validator] + ) + + return _AndValidator(tuple(vals)) + + +def pipe(*converters): + """ + A converter that composes multiple converters into one. + + When called on a value, it runs all wrapped converters, returning the + *last* value. + + Type annotations will be inferred from the wrapped converters', if + they have any. + + :param callables converters: Arbitrary number of converters. + + .. versionadded:: 20.1.0 + """ + + def pipe_converter(val): + for converter in converters: + val = converter(val) + + return val + + if not converters: + # If the converter list is empty, pipe_converter is the identity. + A = typing.TypeVar("A") + pipe_converter.__annotations__ = {"val": A, "return": A} + else: + # Get parameter type from first converter. + t = _AnnotationExtractor(converters[0]).get_first_param_type() + if t: + pipe_converter.__annotations__["val"] = t + + # Get return type from last converter. + rt = _AnnotationExtractor(converters[-1]).get_return_type() + if rt: + pipe_converter.__annotations__["return"] = rt + + return pipe_converter diff --git a/src/poetry/core/_vendor/attr/_next_gen.py b/src/poetry/core/_vendor/attr/_next_gen.py new file mode 100644 index 0000000..5a06a74 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_next_gen.py @@ -0,0 +1,220 @@ +# SPDX-License-Identifier: MIT + +""" +These are Python 3.6+-only and keyword-only APIs that call `attr.s` and +`attr.ib` with different default values. +""" + + +from functools import partial + +from . import setters +from ._funcs import asdict as _asdict +from ._funcs import astuple as _astuple +from ._make import ( + NOTHING, + _frozen_setattrs, + _ng_default_on_setattr, + attrib, + attrs, +) +from .exceptions import UnannotatedAttributeError + + +def define( + maybe_cls=None, + *, + these=None, + repr=None, + hash=None, + init=None, + slots=True, + frozen=False, + weakref_slot=True, + str=False, + auto_attribs=None, + kw_only=False, + cache_hash=False, + auto_exc=True, + eq=None, + order=False, + auto_detect=True, + getstate_setstate=None, + on_setattr=None, + field_transformer=None, + match_args=True, +): + r""" + Define an ``attrs`` class. + + Differences to the classic `attr.s` that it uses underneath: + + - Automatically detect whether or not *auto_attribs* should be `True` (c.f. + *auto_attribs* parameter). + - If *frozen* is `False`, run converters and validators when setting an + attribute by default. + - *slots=True* + + .. caution:: + + Usually this has only upsides and few visible effects in everyday + programming. But it *can* lead to some suprising behaviors, so please + make sure to read :term:`slotted classes`. + - *auto_exc=True* + - *auto_detect=True* + - *order=False* + - Some options that were only relevant on Python 2 or were kept around for + backwards-compatibility have been removed. + + Please note that these are all defaults and you can change them as you + wish. + + :param Optional[bool] auto_attribs: If set to `True` or `False`, it behaves + exactly like `attr.s`. If left `None`, `attr.s` will try to guess: + + 1. If any attributes are annotated and no unannotated `attrs.fields`\ s + are found, it assumes *auto_attribs=True*. + 2. Otherwise it assumes *auto_attribs=False* and tries to collect + `attrs.fields`\ s. + + For now, please refer to `attr.s` for the rest of the parameters. + + .. versionadded:: 20.1.0 + .. versionchanged:: 21.3.0 Converters are also run ``on_setattr``. + """ + + def do_it(cls, auto_attribs): + return attrs( + maybe_cls=cls, + these=these, + repr=repr, + hash=hash, + init=init, + slots=slots, + frozen=frozen, + weakref_slot=weakref_slot, + str=str, + auto_attribs=auto_attribs, + kw_only=kw_only, + cache_hash=cache_hash, + auto_exc=auto_exc, + eq=eq, + order=order, + auto_detect=auto_detect, + collect_by_mro=True, + getstate_setstate=getstate_setstate, + on_setattr=on_setattr, + field_transformer=field_transformer, + match_args=match_args, + ) + + def wrap(cls): + """ + Making this a wrapper ensures this code runs during class creation. + + We also ensure that frozen-ness of classes is inherited. + """ + nonlocal frozen, on_setattr + + had_on_setattr = on_setattr not in (None, setters.NO_OP) + + # By default, mutable classes convert & validate on setattr. + if frozen is False and on_setattr is None: + on_setattr = _ng_default_on_setattr + + # However, if we subclass a frozen class, we inherit the immutability + # and disable on_setattr. + for base_cls in cls.__bases__: + if base_cls.__setattr__ is _frozen_setattrs: + if had_on_setattr: + raise ValueError( + "Frozen classes can't use on_setattr " + "(frozen-ness was inherited)." + ) + + on_setattr = setters.NO_OP + break + + if auto_attribs is not None: + return do_it(cls, auto_attribs) + + try: + return do_it(cls, True) + except UnannotatedAttributeError: + return do_it(cls, False) + + # maybe_cls's type depends on the usage of the decorator. It's a class + # if it's used as `@attrs` but ``None`` if used as `@attrs()`. + if maybe_cls is None: + return wrap + else: + return wrap(maybe_cls) + + +mutable = define +frozen = partial(define, frozen=True, on_setattr=None) + + +def field( + *, + default=NOTHING, + validator=None, + repr=True, + hash=None, + init=True, + metadata=None, + converter=None, + factory=None, + kw_only=False, + eq=None, + order=None, + on_setattr=None, +): + """ + Identical to `attr.ib`, except keyword-only and with some arguments + removed. + + .. versionadded:: 20.1.0 + """ + return attrib( + default=default, + validator=validator, + repr=repr, + hash=hash, + init=init, + metadata=metadata, + converter=converter, + factory=factory, + kw_only=kw_only, + eq=eq, + order=order, + on_setattr=on_setattr, + ) + + +def asdict(inst, *, recurse=True, filter=None, value_serializer=None): + """ + Same as `attr.asdict`, except that collections types are always retained + and dict is always used as *dict_factory*. + + .. versionadded:: 21.3.0 + """ + return _asdict( + inst=inst, + recurse=recurse, + filter=filter, + value_serializer=value_serializer, + retain_collection_types=True, + ) + + +def astuple(inst, *, recurse=True, filter=None): + """ + Same as `attr.astuple`, except that collections types are always retained + and `tuple` is always used as the *tuple_factory*. + + .. versionadded:: 21.3.0 + """ + return _astuple( + inst=inst, recurse=recurse, filter=filter, retain_collection_types=True + ) diff --git a/src/poetry/core/_vendor/attr/_version_info.py b/src/poetry/core/_vendor/attr/_version_info.py new file mode 100644 index 0000000..51a1312 --- /dev/null +++ b/src/poetry/core/_vendor/attr/_version_info.py @@ -0,0 +1,86 @@ +# SPDX-License-Identifier: MIT + + +from functools import total_ordering + +from ._funcs import astuple +from ._make import attrib, attrs + + +@total_ordering +@attrs(eq=False, order=False, slots=True, frozen=True) +class VersionInfo: + """ + A version object that can be compared to tuple of length 1--4: + + >>> attr.VersionInfo(19, 1, 0, "final") <= (19, 2) + True + >>> attr.VersionInfo(19, 1, 0, "final") < (19, 1, 1) + True + >>> vi = attr.VersionInfo(19, 2, 0, "final") + >>> vi < (19, 1, 1) + False + >>> vi < (19,) + False + >>> vi == (19, 2,) + True + >>> vi == (19, 2, 1) + False + + .. versionadded:: 19.2 + """ + + year = attrib(type=int) + minor = attrib(type=int) + micro = attrib(type=int) + releaselevel = attrib(type=str) + + @classmethod + def _from_version_string(cls, s): + """ + Parse *s* and return a _VersionInfo. + """ + v = s.split(".") + if len(v) == 3: + v.append("final") + + return cls( + year=int(v[0]), minor=int(v[1]), micro=int(v[2]), releaselevel=v[3] + ) + + def _ensure_tuple(self, other): + """ + Ensure *other* is a tuple of a valid length. + + Returns a possibly transformed *other* and ourselves as a tuple of + the same length as *other*. + """ + + if self.__class__ is other.__class__: + other = astuple(other) + + if not isinstance(other, tuple): + raise NotImplementedError + + if not (1 <= len(other) <= 4): + raise NotImplementedError + + return astuple(self)[: len(other)], other + + def __eq__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + return us == them + + def __lt__(self, other): + try: + us, them = self._ensure_tuple(other) + except NotImplementedError: + return NotImplemented + + # Since alphabetically "dev0" < "final" < "post1" < "post2", we don't + # have to do anything special with releaselevel for now. + return us < them diff --git a/src/poetry/core/_vendor/attr/converters.py b/src/poetry/core/_vendor/attr/converters.py new file mode 100644 index 0000000..a73626c --- /dev/null +++ b/src/poetry/core/_vendor/attr/converters.py @@ -0,0 +1,144 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful converters. +""" + + +import typing + +from ._compat import _AnnotationExtractor +from ._make import NOTHING, Factory, pipe + + +__all__ = [ + "default_if_none", + "optional", + "pipe", + "to_bool", +] + + +def optional(converter): + """ + A converter that allows an attribute to be optional. An optional attribute + is one which can be set to ``None``. + + Type annotations will be inferred from the wrapped converter's, if it + has any. + + :param callable converter: the converter that is used for non-``None`` + values. + + .. versionadded:: 17.1.0 + """ + + def optional_converter(val): + if val is None: + return None + return converter(val) + + xtr = _AnnotationExtractor(converter) + + t = xtr.get_first_param_type() + if t: + optional_converter.__annotations__["val"] = typing.Optional[t] + + rt = xtr.get_return_type() + if rt: + optional_converter.__annotations__["return"] = typing.Optional[rt] + + return optional_converter + + +def default_if_none(default=NOTHING, factory=None): + """ + A converter that allows to replace ``None`` values by *default* or the + result of *factory*. + + :param default: Value to be used if ``None`` is passed. Passing an instance + of `attrs.Factory` is supported, however the ``takes_self`` option + is *not*. + :param callable factory: A callable that takes no parameters whose result + is used if ``None`` is passed. + + :raises TypeError: If **neither** *default* or *factory* is passed. + :raises TypeError: If **both** *default* and *factory* are passed. + :raises ValueError: If an instance of `attrs.Factory` is passed with + ``takes_self=True``. + + .. versionadded:: 18.2.0 + """ + if default is NOTHING and factory is None: + raise TypeError("Must pass either `default` or `factory`.") + + if default is not NOTHING and factory is not None: + raise TypeError( + "Must pass either `default` or `factory` but not both." + ) + + if factory is not None: + default = Factory(factory) + + if isinstance(default, Factory): + if default.takes_self: + raise ValueError( + "`takes_self` is not supported by default_if_none." + ) + + def default_if_none_converter(val): + if val is not None: + return val + + return default.factory() + + else: + + def default_if_none_converter(val): + if val is not None: + return val + + return default + + return default_if_none_converter + + +def to_bool(val): + """ + Convert "boolean" strings (e.g., from env. vars.) to real booleans. + + Values mapping to :code:`True`: + + - :code:`True` + - :code:`"true"` / :code:`"t"` + - :code:`"yes"` / :code:`"y"` + - :code:`"on"` + - :code:`"1"` + - :code:`1` + + Values mapping to :code:`False`: + + - :code:`False` + - :code:`"false"` / :code:`"f"` + - :code:`"no"` / :code:`"n"` + - :code:`"off"` + - :code:`"0"` + - :code:`0` + + :raises ValueError: for any other value. + + .. versionadded:: 21.3.0 + """ + if isinstance(val, str): + val = val.lower() + truthy = {True, "true", "t", "yes", "y", "on", "1", 1} + falsy = {False, "false", "f", "no", "n", "off", "0", 0} + try: + if val in truthy: + return True + if val in falsy: + return False + except TypeError: + # Raised when "val" is not hashable (e.g., lists) + pass + raise ValueError("Cannot convert value to bool: {}".format(val)) diff --git a/src/poetry/core/_vendor/attr/exceptions.py b/src/poetry/core/_vendor/attr/exceptions.py new file mode 100644 index 0000000..5dc51e0 --- /dev/null +++ b/src/poetry/core/_vendor/attr/exceptions.py @@ -0,0 +1,92 @@ +# SPDX-License-Identifier: MIT + + +class FrozenError(AttributeError): + """ + A frozen/immutable instance or attribute have been attempted to be + modified. + + It mirrors the behavior of ``namedtuples`` by using the same error message + and subclassing `AttributeError`. + + .. versionadded:: 20.1.0 + """ + + msg = "can't set attribute" + args = [msg] + + +class FrozenInstanceError(FrozenError): + """ + A frozen instance has been attempted to be modified. + + .. versionadded:: 16.1.0 + """ + + +class FrozenAttributeError(FrozenError): + """ + A frozen attribute has been attempted to be modified. + + .. versionadded:: 20.1.0 + """ + + +class AttrsAttributeNotFoundError(ValueError): + """ + An ``attrs`` function couldn't find an attribute that the user asked for. + + .. versionadded:: 16.2.0 + """ + + +class NotAnAttrsClassError(ValueError): + """ + A non-``attrs`` class has been passed into an ``attrs`` function. + + .. versionadded:: 16.2.0 + """ + + +class DefaultAlreadySetError(RuntimeError): + """ + A default has been set using ``attr.ib()`` and is attempted to be reset + using the decorator. + + .. versionadded:: 17.1.0 + """ + + +class UnannotatedAttributeError(RuntimeError): + """ + A class with ``auto_attribs=True`` has an ``attr.ib()`` without a type + annotation. + + .. versionadded:: 17.3.0 + """ + + +class PythonTooOldError(RuntimeError): + """ + It was attempted to use an ``attrs`` feature that requires a newer Python + version. + + .. versionadded:: 18.2.0 + """ + + +class NotCallableError(TypeError): + """ + A ``attr.ib()`` requiring a callable has been set with a value + that is not callable. + + .. versionadded:: 19.2.0 + """ + + def __init__(self, msg, value): + super(TypeError, self).__init__(msg, value) + self.msg = msg + self.value = value + + def __str__(self): + return str(self.msg) diff --git a/src/poetry/core/_vendor/attr/filters.py b/src/poetry/core/_vendor/attr/filters.py new file mode 100644 index 0000000..baa25e9 --- /dev/null +++ b/src/poetry/core/_vendor/attr/filters.py @@ -0,0 +1,51 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful filters for `attr.asdict`. +""" + +from ._make import Attribute + + +def _split_what(what): + """ + Returns a tuple of `frozenset`s of classes and attributes. + """ + return ( + frozenset(cls for cls in what if isinstance(cls, type)), + frozenset(cls for cls in what if isinstance(cls, Attribute)), + ) + + +def include(*what): + """ + Include *what*. + + :param what: What to include. + :type what: `list` of `type` or `attrs.Attribute`\\ s + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def include_(attribute, value): + return value.__class__ in cls or attribute in attrs + + return include_ + + +def exclude(*what): + """ + Exclude *what*. + + :param what: What to exclude. + :type what: `list` of classes or `attrs.Attribute`\\ s. + + :rtype: `callable` + """ + cls, attrs = _split_what(what) + + def exclude_(attribute, value): + return value.__class__ not in cls and attribute not in attrs + + return exclude_ diff --git a/src/poetry/core/_vendor/attr/py.typed b/src/poetry/core/_vendor/attr/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/attr/setters.py b/src/poetry/core/_vendor/attr/setters.py new file mode 100644 index 0000000..12ed675 --- /dev/null +++ b/src/poetry/core/_vendor/attr/setters.py @@ -0,0 +1,73 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly used hooks for on_setattr. +""" + + +from . import _config +from .exceptions import FrozenAttributeError + + +def pipe(*setters): + """ + Run all *setters* and return the return value of the last one. + + .. versionadded:: 20.1.0 + """ + + def wrapped_pipe(instance, attrib, new_value): + rv = new_value + + for setter in setters: + rv = setter(instance, attrib, rv) + + return rv + + return wrapped_pipe + + +def frozen(_, __, ___): + """ + Prevent an attribute to be modified. + + .. versionadded:: 20.1.0 + """ + raise FrozenAttributeError() + + +def validate(instance, attrib, new_value): + """ + Run *attrib*'s validator on *new_value* if it has one. + + .. versionadded:: 20.1.0 + """ + if _config._run_validators is False: + return new_value + + v = attrib.validator + if not v: + return new_value + + v(instance, attrib, new_value) + + return new_value + + +def convert(instance, attrib, new_value): + """ + Run *attrib*'s converter -- if it has one -- on *new_value* and return the + result. + + .. versionadded:: 20.1.0 + """ + c = attrib.converter + if c: + return c(new_value) + + return new_value + + +# Sentinel for disabling class-wide *on_setattr* hooks for certain attributes. +# autodata stopped working, so the docstring is inlined in the API docs. +NO_OP = object() diff --git a/src/poetry/core/_vendor/attr/validators.py b/src/poetry/core/_vendor/attr/validators.py new file mode 100644 index 0000000..eece517 --- /dev/null +++ b/src/poetry/core/_vendor/attr/validators.py @@ -0,0 +1,594 @@ +# SPDX-License-Identifier: MIT + +""" +Commonly useful validators. +""" + + +import operator +import re + +from contextlib import contextmanager + +from ._config import get_run_validators, set_run_validators +from ._make import _AndValidator, and_, attrib, attrs +from .exceptions import NotCallableError + + +try: + Pattern = re.Pattern +except AttributeError: # Python <3.7 lacks a Pattern type. + Pattern = type(re.compile("")) + + +__all__ = [ + "and_", + "deep_iterable", + "deep_mapping", + "disabled", + "ge", + "get_disabled", + "gt", + "in_", + "instance_of", + "is_callable", + "le", + "lt", + "matches_re", + "max_len", + "min_len", + "optional", + "provides", + "set_disabled", +] + + +def set_disabled(disabled): + """ + Globally disable or enable running validators. + + By default, they are run. + + :param disabled: If ``True``, disable running all validators. + :type disabled: bool + + .. warning:: + + This function is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(not disabled) + + +def get_disabled(): + """ + Return a bool indicating whether validators are currently disabled or not. + + :return: ``True`` if validators are currently disabled. + :rtype: bool + + .. versionadded:: 21.3.0 + """ + return not get_run_validators() + + +@contextmanager +def disabled(): + """ + Context manager that disables running validators within its context. + + .. warning:: + + This context manager is not thread-safe! + + .. versionadded:: 21.3.0 + """ + set_run_validators(False) + try: + yield + finally: + set_run_validators(True) + + +@attrs(repr=False, slots=True, hash=True) +class _InstanceOfValidator: + type = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not isinstance(value, self.type): + raise TypeError( + "'{name}' must be {type!r} (got {value!r} that is a " + "{actual!r}).".format( + name=attr.name, + type=self.type, + actual=value.__class__, + value=value, + ), + attr, + self.type, + value, + ) + + def __repr__(self): + return "".format( + type=self.type + ) + + +def instance_of(type): + """ + A validator that raises a `TypeError` if the initializer is called + with a wrong type for this particular attribute (checks are performed using + `isinstance` therefore it's also valid to pass a tuple of types). + + :param type: The type to check for. + :type type: type or tuple of types + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected type, and the value it + got. + """ + return _InstanceOfValidator(type) + + +@attrs(repr=False, frozen=True, slots=True) +class _MatchesReValidator: + pattern = attrib() + match_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.match_func(value): + raise ValueError( + "'{name}' must match regex {pattern!r}" + " ({value!r} doesn't)".format( + name=attr.name, pattern=self.pattern.pattern, value=value + ), + attr, + self.pattern, + value, + ) + + def __repr__(self): + return "".format( + pattern=self.pattern + ) + + +def matches_re(regex, flags=0, func=None): + r""" + A validator that raises `ValueError` if the initializer is called + with a string that doesn't match *regex*. + + :param regex: a regex string or precompiled pattern to match against + :param int flags: flags that will be passed to the underlying re function + (default 0) + :param callable func: which underlying `re` function to call. Valid options + are `re.fullmatch`, `re.search`, and `re.match`; the default ``None`` + means `re.fullmatch`. For performance reasons, the pattern is always + precompiled using `re.compile`. + + .. versionadded:: 19.2.0 + .. versionchanged:: 21.3.0 *regex* can be a pre-compiled pattern. + """ + valid_funcs = (re.fullmatch, None, re.search, re.match) + if func not in valid_funcs: + raise ValueError( + "'func' must be one of {}.".format( + ", ".join( + sorted( + e and e.__name__ or "None" for e in set(valid_funcs) + ) + ) + ) + ) + + if isinstance(regex, Pattern): + if flags: + raise TypeError( + "'flags' can only be used with a string pattern; " + "pass flags to re.compile() instead" + ) + pattern = regex + else: + pattern = re.compile(regex, flags) + + if func is re.match: + match_func = pattern.match + elif func is re.search: + match_func = pattern.search + else: + match_func = pattern.fullmatch + + return _MatchesReValidator(pattern, match_func) + + +@attrs(repr=False, slots=True, hash=True) +class _ProvidesValidator: + interface = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.interface.providedBy(value): + raise TypeError( + "'{name}' must provide {interface!r} which {value!r} " + "doesn't.".format( + name=attr.name, interface=self.interface, value=value + ), + attr, + self.interface, + value, + ) + + def __repr__(self): + return "".format( + interface=self.interface + ) + + +def provides(interface): + """ + A validator that raises a `TypeError` if the initializer is called + with an object that does not provide the requested *interface* (checks are + performed using ``interface.providedBy(value)`` (see `zope.interface + `_). + + :param interface: The interface to check for. + :type interface: ``zope.interface.Interface`` + + :raises TypeError: With a human readable error message, the attribute + (of type `attrs.Attribute`), the expected interface, and the + value it got. + """ + return _ProvidesValidator(interface) + + +@attrs(repr=False, slots=True, hash=True) +class _OptionalValidator: + validator = attrib() + + def __call__(self, inst, attr, value): + if value is None: + return + + self.validator(inst, attr, value) + + def __repr__(self): + return "".format( + what=repr(self.validator) + ) + + +def optional(validator): + """ + A validator that makes an attribute optional. An optional attribute is one + which can be set to ``None`` in addition to satisfying the requirements of + the sub-validator. + + :param validator: A validator (or a list of validators) that is used for + non-``None`` values. + :type validator: callable or `list` of callables. + + .. versionadded:: 15.1.0 + .. versionchanged:: 17.1.0 *validator* can be a list of validators. + """ + if isinstance(validator, list): + return _OptionalValidator(_AndValidator(validator)) + return _OptionalValidator(validator) + + +@attrs(repr=False, slots=True, hash=True) +class _InValidator: + options = attrib() + + def __call__(self, inst, attr, value): + try: + in_options = value in self.options + except TypeError: # e.g. `1 in "abc"` + in_options = False + + if not in_options: + raise ValueError( + "'{name}' must be in {options!r} (got {value!r})".format( + name=attr.name, options=self.options, value=value + ), + attr, + self.options, + value, + ) + + def __repr__(self): + return "".format( + options=self.options + ) + + +def in_(options): + """ + A validator that raises a `ValueError` if the initializer is called + with a value that does not belong in the options provided. The check is + performed using ``value in options``. + + :param options: Allowed options. + :type options: list, tuple, `enum.Enum`, ... + + :raises ValueError: With a human readable error message, the attribute (of + type `attrs.Attribute`), the expected options, and the value it + got. + + .. versionadded:: 17.1.0 + .. versionchanged:: 22.1.0 + The ValueError was incomplete until now and only contained the human + readable error message. Now it contains all the information that has + been promised since 17.1.0. + """ + return _InValidator(options) + + +@attrs(repr=False, slots=False, hash=True) +class _IsCallableValidator: + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not callable(value): + message = ( + "'{name}' must be callable " + "(got {value!r} that is a {actual!r})." + ) + raise NotCallableError( + msg=message.format( + name=attr.name, value=value, actual=value.__class__ + ), + value=value, + ) + + def __repr__(self): + return "" + + +def is_callable(): + """ + A validator that raises a `attr.exceptions.NotCallableError` if the + initializer is called with a value for this particular attribute + that is not callable. + + .. versionadded:: 19.1.0 + + :raises `attr.exceptions.NotCallableError`: With a human readable error + message containing the attribute (`attrs.Attribute`) name, + and the value it got. + """ + return _IsCallableValidator() + + +@attrs(repr=False, slots=True, hash=True) +class _DeepIterable: + member_validator = attrib(validator=is_callable()) + iterable_validator = attrib( + default=None, validator=optional(is_callable()) + ) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.iterable_validator is not None: + self.iterable_validator(inst, attr, value) + + for member in value: + self.member_validator(inst, attr, member) + + def __repr__(self): + iterable_identifier = ( + "" + if self.iterable_validator is None + else " {iterable!r}".format(iterable=self.iterable_validator) + ) + return ( + "" + ).format( + iterable_identifier=iterable_identifier, + member=self.member_validator, + ) + + +def deep_iterable(member_validator, iterable_validator=None): + """ + A validator that performs deep validation of an iterable. + + :param member_validator: Validator(s) to apply to iterable members + :param iterable_validator: Validator to apply to iterable itself + (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + if isinstance(member_validator, (list, tuple)): + member_validator = and_(*member_validator) + return _DeepIterable(member_validator, iterable_validator) + + +@attrs(repr=False, slots=True, hash=True) +class _DeepMapping: + key_validator = attrib(validator=is_callable()) + value_validator = attrib(validator=is_callable()) + mapping_validator = attrib(default=None, validator=optional(is_callable())) + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if self.mapping_validator is not None: + self.mapping_validator(inst, attr, value) + + for key in value: + self.key_validator(inst, attr, key) + self.value_validator(inst, attr, value[key]) + + def __repr__(self): + return ( + "" + ).format(key=self.key_validator, value=self.value_validator) + + +def deep_mapping(key_validator, value_validator, mapping_validator=None): + """ + A validator that performs deep validation of a dictionary. + + :param key_validator: Validator to apply to dictionary keys + :param value_validator: Validator to apply to dictionary values + :param mapping_validator: Validator to apply to top-level mapping + attribute (optional) + + .. versionadded:: 19.1.0 + + :raises TypeError: if any sub-validators fail + """ + return _DeepMapping(key_validator, value_validator, mapping_validator) + + +@attrs(repr=False, frozen=True, slots=True) +class _NumberValidator: + bound = attrib() + compare_op = attrib() + compare_func = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if not self.compare_func(value, self.bound): + raise ValueError( + "'{name}' must be {op} {bound}: {value}".format( + name=attr.name, + op=self.compare_op, + bound=self.bound, + value=value, + ) + ) + + def __repr__(self): + return "".format( + op=self.compare_op, bound=self.bound + ) + + +def lt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number larger or equal to *val*. + + :param val: Exclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<", operator.lt) + + +def le(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number greater than *val*. + + :param val: Inclusive upper bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, "<=", operator.le) + + +def ge(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller than *val*. + + :param val: Inclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">=", operator.ge) + + +def gt(val): + """ + A validator that raises `ValueError` if the initializer is called + with a number smaller or equal to *val*. + + :param val: Exclusive lower bound for values + + .. versionadded:: 21.3.0 + """ + return _NumberValidator(val, ">", operator.gt) + + +@attrs(repr=False, frozen=True, slots=True) +class _MaxLengthValidator: + max_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) > self.max_length: + raise ValueError( + "Length of '{name}' must be <= {max}: {len}".format( + name=attr.name, max=self.max_length, len=len(value) + ) + ) + + def __repr__(self): + return "".format(max=self.max_length) + + +def max_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is longer than *length*. + + :param int length: Maximum length of the string or iterable + + .. versionadded:: 21.3.0 + """ + return _MaxLengthValidator(length) + + +@attrs(repr=False, frozen=True, slots=True) +class _MinLengthValidator: + min_length = attrib() + + def __call__(self, inst, attr, value): + """ + We use a callable class to be able to change the ``__repr__``. + """ + if len(value) < self.min_length: + raise ValueError( + "Length of '{name}' must be => {min}: {len}".format( + name=attr.name, min=self.min_length, len=len(value) + ) + ) + + def __repr__(self): + return "".format(min=self.min_length) + + +def min_len(length): + """ + A validator that raises `ValueError` if the initializer is called + with a string or iterable that is shorter than *length*. + + :param int length: Minimum length of the string or iterable + + .. versionadded:: 22.1.0 + """ + return _MinLengthValidator(length) diff --git a/src/poetry/core/_vendor/attrs/LICENSE b/src/poetry/core/_vendor/attrs/LICENSE new file mode 100644 index 0000000..2bd6453 --- /dev/null +++ b/src/poetry/core/_vendor/attrs/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2015 Hynek Schlawack and the attrs contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/src/poetry/core/_vendor/attrs/__init__.py b/src/poetry/core/_vendor/attrs/__init__.py new file mode 100644 index 0000000..a704b8b --- /dev/null +++ b/src/poetry/core/_vendor/attrs/__init__.py @@ -0,0 +1,70 @@ +# SPDX-License-Identifier: MIT + +from attr import ( + NOTHING, + Attribute, + Factory, + __author__, + __copyright__, + __description__, + __doc__, + __email__, + __license__, + __title__, + __url__, + __version__, + __version_info__, + assoc, + cmp_using, + define, + evolve, + field, + fields, + fields_dict, + frozen, + has, + make_class, + mutable, + resolve_types, + validate, +) +from attr._next_gen import asdict, astuple + +from . import converters, exceptions, filters, setters, validators + + +__all__ = [ + "__author__", + "__copyright__", + "__description__", + "__doc__", + "__email__", + "__license__", + "__title__", + "__url__", + "__version__", + "__version_info__", + "asdict", + "assoc", + "astuple", + "Attribute", + "cmp_using", + "converters", + "define", + "evolve", + "exceptions", + "Factory", + "field", + "fields_dict", + "fields", + "filters", + "frozen", + "has", + "make_class", + "mutable", + "NOTHING", + "resolve_types", + "setters", + "validate", + "validators", +] diff --git a/src/poetry/core/_vendor/attrs/converters.py b/src/poetry/core/_vendor/attrs/converters.py new file mode 100644 index 0000000..edfa8d3 --- /dev/null +++ b/src/poetry/core/_vendor/attrs/converters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.converters import * # noqa diff --git a/src/poetry/core/_vendor/attrs/exceptions.py b/src/poetry/core/_vendor/attrs/exceptions.py new file mode 100644 index 0000000..bd9efed --- /dev/null +++ b/src/poetry/core/_vendor/attrs/exceptions.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.exceptions import * # noqa diff --git a/src/poetry/core/_vendor/attrs/filters.py b/src/poetry/core/_vendor/attrs/filters.py new file mode 100644 index 0000000..5295900 --- /dev/null +++ b/src/poetry/core/_vendor/attrs/filters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.filters import * # noqa diff --git a/src/poetry/core/_vendor/attrs/py.typed b/src/poetry/core/_vendor/attrs/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/attrs/setters.py b/src/poetry/core/_vendor/attrs/setters.py new file mode 100644 index 0000000..9b50770 --- /dev/null +++ b/src/poetry/core/_vendor/attrs/setters.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.setters import * # noqa diff --git a/src/poetry/core/_vendor/attrs/validators.py b/src/poetry/core/_vendor/attrs/validators.py new file mode 100644 index 0000000..ab2c9b3 --- /dev/null +++ b/src/poetry/core/_vendor/attrs/validators.py @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: MIT + +from attr.validators import * # noqa diff --git a/src/poetry/core/_vendor/jsonschema/COPYING b/src/poetry/core/_vendor/jsonschema/COPYING new file mode 100644 index 0000000..af9cfbd --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2013 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/src/poetry/core/_vendor/jsonschema/__init__.py b/src/poetry/core/_vendor/jsonschema/__init__.py new file mode 100644 index 0000000..6628fc7 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/__init__.py @@ -0,0 +1,71 @@ +""" +An implementation of JSON Schema for Python + +The main functionality is provided by the validator classes for each of the +supported JSON Schema versions. + +Most commonly, `jsonschema.validators.validate` is the quickest way to simply +validate a given instance under a schema, and will create a validator +for you. +""" +import warnings + +from jsonschema._format import FormatChecker +from jsonschema._types import TypeChecker +from jsonschema.exceptions import ( + ErrorTree, + FormatError, + RefResolutionError, + SchemaError, + ValidationError, +) +from jsonschema.protocols import Validator +from jsonschema.validators import ( + Draft3Validator, + Draft4Validator, + Draft6Validator, + Draft7Validator, + Draft201909Validator, + Draft202012Validator, + RefResolver, + validate, +) + + +def __getattr__(name): + if name == "__version__": + warnings.warn( + "Accessing jsonschema.__version__ is deprecated and will be " + "removed in a future release. Use importlib.metadata directly " + "to query for jsonschema's version.", + DeprecationWarning, + stacklevel=2, + ) + + try: + from importlib import metadata + except ImportError: + import importlib_metadata as metadata + + return metadata.version("jsonschema") + + format_checkers = { + "draft3_format_checker": Draft3Validator, + "draft4_format_checker": Draft4Validator, + "draft6_format_checker": Draft6Validator, + "draft7_format_checker": Draft7Validator, + "draft201909_format_checker": Draft201909Validator, + "draft202012_format_checker": Draft202012Validator, + } + ValidatorForFormat = format_checkers.get(name) + if ValidatorForFormat is not None: + warnings.warn( + f"Accessing jsonschema.{name} is deprecated and will be " + "removed in a future release. Instead, use the FORMAT_CHECKER " + "attribute on the corresponding Validator.", + DeprecationWarning, + stacklevel=2, + ) + return ValidatorForFormat.FORMAT_CHECKER + + raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/src/poetry/core/_vendor/jsonschema/__main__.py b/src/poetry/core/_vendor/jsonschema/__main__.py new file mode 100644 index 0000000..fdc21e2 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/__main__.py @@ -0,0 +1,3 @@ +from jsonschema.cli import main + +main() diff --git a/src/poetry/core/_vendor/jsonschema/_format.py b/src/poetry/core/_vendor/jsonschema/_format.py new file mode 100644 index 0000000..6a25461 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/_format.py @@ -0,0 +1,513 @@ +from __future__ import annotations + +from contextlib import suppress +from uuid import UUID +import datetime +import ipaddress +import re +import typing +import warnings + +from jsonschema.exceptions import FormatError + +_FormatCheckCallable = typing.Callable[[object], bool] +_F = typing.TypeVar("_F", bound=_FormatCheckCallable) +_RaisesType = typing.Union[ + typing.Type[Exception], typing.Tuple[typing.Type[Exception], ...], +] + + +class FormatChecker: + """ + A ``format`` property checker. + + JSON Schema does not mandate that the ``format`` property actually do any + validation. If validation is desired however, instances of this class can + be hooked into validators to enable format validation. + + `FormatChecker` objects always return ``True`` when asked about + formats that they do not know how to validate. + + To add a check for a custom format use the `FormatChecker.checks` + decorator. + + Arguments: + + formats: + + The known formats to validate. This argument can be used to + limit which formats will be used during validation. + """ + + checkers: dict[ + str, + tuple[_FormatCheckCallable, _RaisesType], + ] = {} + + def __init__(self, formats: typing.Iterable[str] = None): + if formats is None: + formats = self.checkers.keys() + self.checkers = {k: self.checkers[k] for k in formats} + + def __repr__(self): + return "".format(sorted(self.checkers)) + + def checks( + self, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + """ + Register a decorated function as validating a new format. + + Arguments: + + format: + + The format that the decorated function will check. + + raises: + + The exception(s) raised by the decorated function when an + invalid instance is found. + + The exception object will be accessible as the + `jsonschema.exceptions.ValidationError.cause` attribute of the + resulting validation error. + """ + + def _checks(func: _F) -> _F: + self.checkers[format] = (func, raises) + return func + + return _checks + + @classmethod + def cls_checks( + cls, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + warnings.warn( + ( + "FormatChecker.cls_checks is deprecated. Call " + "FormatChecker.checks on a specific FormatChecker instance " + "instead." + ), + DeprecationWarning, + stacklevel=2, + ) + return cls._cls_checks(format=format, raises=raises) + + @classmethod + def _cls_checks( + cls, format: str, raises: _RaisesType = (), + ) -> typing.Callable[[_F], _F]: + def _checks(func: _F) -> _F: + cls.checkers[format] = (func, raises) + return func + + return _checks + + def check(self, instance: object, format: str) -> None: + """ + Check whether the instance conforms to the given format. + + Arguments: + + instance (*any primitive type*, i.e. str, number, bool): + + The instance to check + + format: + + The format that instance should conform to + + Raises: + + FormatError: + + if the instance does not conform to ``format`` + """ + + if format not in self.checkers: + return + + func, raises = self.checkers[format] + result, cause = None, None + try: + result = func(instance) + except raises as e: + cause = e + if not result: + raise FormatError(f"{instance!r} is not a {format!r}", cause=cause) + + def conforms(self, instance: object, format: str) -> bool: + """ + Check whether the instance conforms to the given format. + + Arguments: + + instance (*any primitive type*, i.e. str, number, bool): + + The instance to check + + format: + + The format that instance should conform to + + Returns: + + bool: whether it conformed + """ + + try: + self.check(instance, format) + except FormatError: + return False + else: + return True + + +draft3_format_checker = FormatChecker() +draft4_format_checker = FormatChecker() +draft6_format_checker = FormatChecker() +draft7_format_checker = FormatChecker() +draft201909_format_checker = FormatChecker() +draft202012_format_checker = FormatChecker() + +_draft_checkers: dict[str, FormatChecker] = dict( + draft3=draft3_format_checker, + draft4=draft4_format_checker, + draft6=draft6_format_checker, + draft7=draft7_format_checker, + draft201909=draft201909_format_checker, + draft202012=draft202012_format_checker, +) + + +def _checks_drafts( + name=None, + draft3=None, + draft4=None, + draft6=None, + draft7=None, + draft201909=None, + draft202012=None, + raises=(), +) -> typing.Callable[[_F], _F]: + draft3 = draft3 or name + draft4 = draft4 or name + draft6 = draft6 or name + draft7 = draft7 or name + draft201909 = draft201909 or name + draft202012 = draft202012 or name + + def wrap(func: _F) -> _F: + if draft3: + func = _draft_checkers["draft3"].checks(draft3, raises)(func) + if draft4: + func = _draft_checkers["draft4"].checks(draft4, raises)(func) + if draft6: + func = _draft_checkers["draft6"].checks(draft6, raises)(func) + if draft7: + func = _draft_checkers["draft7"].checks(draft7, raises)(func) + if draft201909: + func = _draft_checkers["draft201909"].checks(draft201909, raises)( + func, + ) + if draft202012: + func = _draft_checkers["draft202012"].checks(draft202012, raises)( + func, + ) + + # Oy. This is bad global state, but relied upon for now, until + # deprecation. See #519 and test_format_checkers_come_with_defaults + FormatChecker._cls_checks( + draft202012 or draft201909 or draft7 or draft6 or draft4 or draft3, + raises, + )(func) + return func + + return wrap + + +@_checks_drafts(name="idn-email") +@_checks_drafts(name="email") +def is_email(instance: object) -> bool: + if not isinstance(instance, str): + return True + return "@" in instance + + +@_checks_drafts( + draft3="ip-address", + draft4="ipv4", + draft6="ipv4", + draft7="ipv4", + draft201909="ipv4", + draft202012="ipv4", + raises=ipaddress.AddressValueError, +) +def is_ipv4(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(ipaddress.IPv4Address(instance)) + + +@_checks_drafts(name="ipv6", raises=ipaddress.AddressValueError) +def is_ipv6(instance: object) -> bool: + if not isinstance(instance, str): + return True + address = ipaddress.IPv6Address(instance) + return not getattr(address, "scope_id", "") + + +with suppress(ImportError): + from fqdn import FQDN + + @_checks_drafts( + draft3="host-name", + draft4="hostname", + draft6="hostname", + draft7="hostname", + draft201909="hostname", + draft202012="hostname", + ) + def is_host_name(instance: object) -> bool: + if not isinstance(instance, str): + return True + return FQDN(instance).is_valid + + +with suppress(ImportError): + # The built-in `idna` codec only implements RFC 3890, so we go elsewhere. + import idna + + @_checks_drafts( + draft7="idn-hostname", + draft201909="idn-hostname", + draft202012="idn-hostname", + raises=(idna.IDNAError, UnicodeError), + ) + def is_idn_host_name(instance: object) -> bool: + if not isinstance(instance, str): + return True + idna.encode(instance) + return True + + +try: + import rfc3987 +except ImportError: + with suppress(ImportError): + from rfc3986_validator import validate_rfc3986 + + @_checks_drafts(name="uri") + def is_uri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3986(instance, rule="URI") + + @_checks_drafts( + draft6="uri-reference", + draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", + raises=ValueError, + ) + def is_uri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3986(instance, rule="URI_reference") + +else: + + @_checks_drafts( + draft7="iri", + draft201909="iri", + draft202012="iri", + raises=ValueError, + ) + def is_iri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="IRI") + + @_checks_drafts( + draft7="iri-reference", + draft201909="iri-reference", + draft202012="iri-reference", + raises=ValueError, + ) + def is_iri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="IRI_reference") + + @_checks_drafts(name="uri", raises=ValueError) + def is_uri(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="URI") + + @_checks_drafts( + draft6="uri-reference", + draft7="uri-reference", + draft201909="uri-reference", + draft202012="uri-reference", + raises=ValueError, + ) + def is_uri_reference(instance: object) -> bool: + if not isinstance(instance, str): + return True + return rfc3987.parse(instance, rule="URI_reference") + + +with suppress(ImportError): + from rfc3339_validator import validate_rfc3339 + + @_checks_drafts(name="date-time") + def is_datetime(instance: object) -> bool: + if not isinstance(instance, str): + return True + return validate_rfc3339(instance.upper()) + + @_checks_drafts( + draft7="time", + draft201909="time", + draft202012="time", + ) + def is_time(instance: object) -> bool: + if not isinstance(instance, str): + return True + return is_datetime("1970-01-01T" + instance) + + +@_checks_drafts(name="regex", raises=re.error) +def is_regex(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(re.compile(instance)) + + +@_checks_drafts( + draft3="date", + draft7="date", + draft201909="date", + draft202012="date", + raises=ValueError, +) +def is_date(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(instance.isascii() and datetime.date.fromisoformat(instance)) + + +@_checks_drafts(draft3="time", raises=ValueError) +def is_draft3_time(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(datetime.datetime.strptime(instance, "%H:%M:%S")) + + +with suppress(ImportError): + from webcolors import CSS21_NAMES_TO_HEX + import webcolors + + def is_css_color_code(instance: object) -> bool: + return webcolors.normalize_hex(instance) + + @_checks_drafts(draft3="color", raises=(ValueError, TypeError)) + def is_css21_color(instance: object) -> bool: + if ( + not isinstance(instance, str) + or instance.lower() in CSS21_NAMES_TO_HEX + ): + return True + return is_css_color_code(instance) + + +with suppress(ImportError): + import jsonpointer + + @_checks_drafts( + draft6="json-pointer", + draft7="json-pointer", + draft201909="json-pointer", + draft202012="json-pointer", + raises=jsonpointer.JsonPointerException, + ) + def is_json_pointer(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(jsonpointer.JsonPointer(instance)) + + # TODO: I don't want to maintain this, so it + # needs to go either into jsonpointer (pending + # https://github.com/stefankoegl/python-json-pointer/issues/34) or + # into a new external library. + @_checks_drafts( + draft7="relative-json-pointer", + draft201909="relative-json-pointer", + draft202012="relative-json-pointer", + raises=jsonpointer.JsonPointerException, + ) + def is_relative_json_pointer(instance: object) -> bool: + # Definition taken from: + # https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 + if not isinstance(instance, str): + return True + non_negative_integer, rest = [], "" + for i, character in enumerate(instance): + if character.isdigit(): + # digits with a leading "0" are not allowed + if i > 0 and int(instance[i - 1]) == 0: + return False + + non_negative_integer.append(character) + continue + + if not non_negative_integer: + return False + + rest = instance[i:] + break + return (rest == "#") or bool(jsonpointer.JsonPointer(rest)) + + +with suppress(ImportError): + import uri_template + + @_checks_drafts( + draft6="uri-template", + draft7="uri-template", + draft201909="uri-template", + draft202012="uri-template", + ) + def is_uri_template(instance: object) -> bool: + if not isinstance(instance, str): + return True + return uri_template.validate(instance) + + +with suppress(ImportError): + import isoduration + + @_checks_drafts( + draft201909="duration", + draft202012="duration", + raises=isoduration.DurationParsingException, + ) + def is_duration(instance: object) -> bool: + if not isinstance(instance, str): + return True + return bool(isoduration.parse_duration(instance)) + + +@_checks_drafts( + draft201909="uuid", + draft202012="uuid", + raises=ValueError, +) +def is_uuid(instance: object) -> bool: + if not isinstance(instance, str): + return True + UUID(instance) + return all(instance[position] == "-" for position in (8, 13, 18, 23)) diff --git a/src/poetry/core/_vendor/jsonschema/_legacy_validators.py b/src/poetry/core/_vendor/jsonschema/_legacy_validators.py new file mode 100644 index 0000000..cc5e3f4 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/_legacy_validators.py @@ -0,0 +1,319 @@ +from jsonschema import _utils +from jsonschema.exceptions import ValidationError + + +def id_of_ignore_ref(property="$id"): + def id_of(schema): + """ + Ignore an ``$id`` sibling of ``$ref`` if it is present. + + Otherwise, return the ID of the given schema. + """ + if schema is True or schema is False or "$ref" in schema: + return "" + return schema.get(property, "") + return id_of + + +def ignore_ref_siblings(schema): + """ + Ignore siblings of ``$ref`` if it is present. + + Otherwise, return all keywords. + + Suitable for use with `create`'s ``applicable_validators`` argument. + """ + ref = schema.get("$ref") + if ref is not None: + return [("$ref", ref)] + else: + return schema.items() + + +def dependencies_draft3(validator, dependencies, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependencies.items(): + if property not in instance: + continue + + if validator.is_type(dependency, "object"): + yield from validator.descend( + instance, dependency, schema_path=property, + ) + elif validator.is_type(dependency, "string"): + if dependency not in instance: + message = f"{dependency!r} is a dependency of {property!r}" + yield ValidationError(message) + else: + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependencies_draft4_draft6_draft7( + validator, + dependencies, + instance, + schema, +): + """ + Support for the ``dependencies`` keyword from pre-draft 2019-09. + + In later drafts, the keyword was split into separate + ``dependentRequired`` and ``dependentSchemas`` validators. + """ + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependencies.items(): + if property not in instance: + continue + + if validator.is_type(dependency, "array"): + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + else: + yield from validator.descend( + instance, dependency, schema_path=property, + ) + + +def disallow_draft3(validator, disallow, instance, schema): + for disallowed in _utils.ensure_list(disallow): + if validator.evolve(schema={"type": [disallowed]}).is_valid(instance): + message = f"{disallowed!r} is disallowed for {instance!r}" + yield ValidationError(message) + + +def extends_draft3(validator, extends, instance, schema): + if validator.is_type(extends, "object"): + yield from validator.descend(instance, extends) + return + for index, subschema in enumerate(extends): + yield from validator.descend(instance, subschema, schema_path=index) + + +def items_draft3_draft4(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + if validator.is_type(items, "object"): + for index, item in enumerate(instance): + yield from validator.descend(item, items, path=index) + else: + for (index, item), subschema in zip(enumerate(instance), items): + yield from validator.descend( + item, subschema, path=index, schema_path=index, + ) + + +def items_draft6_draft7_draft201909(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + if validator.is_type(items, "array"): + for (index, item), subschema in zip(enumerate(instance), items): + yield from validator.descend( + item, subschema, path=index, schema_path=index, + ) + else: + for index, item in enumerate(instance): + yield from validator.descend(item, items, path=index) + + +def minimum_draft3_draft4(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if schema.get("exclusiveMinimum", False): + failed = instance <= minimum + cmp = "less than or equal to" + else: + failed = instance < minimum + cmp = "less than" + + if failed: + message = f"{instance!r} is {cmp} the minimum of {minimum!r}" + yield ValidationError(message) + + +def maximum_draft3_draft4(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if schema.get("exclusiveMaximum", False): + failed = instance >= maximum + cmp = "greater than or equal to" + else: + failed = instance > maximum + cmp = "greater than" + + if failed: + message = f"{instance!r} is {cmp} the maximum of {maximum!r}" + yield ValidationError(message) + + +def properties_draft3(validator, properties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, subschema in properties.items(): + if property in instance: + yield from validator.descend( + instance[property], + subschema, + path=property, + schema_path=property, + ) + elif subschema.get("required", False): + error = ValidationError(f"{property!r} is a required property") + error._set( + validator="required", + validator_value=subschema["required"], + instance=instance, + schema=schema, + ) + error.path.appendleft(property) + error.schema_path.extend([property, "required"]) + yield error + + +def type_draft3(validator, types, instance, schema): + types = _utils.ensure_list(types) + + all_errors = [] + for index, type in enumerate(types): + if validator.is_type(type, "object"): + errors = list(validator.descend(instance, type, schema_path=index)) + if not errors: + return + all_errors.extend(errors) + else: + if validator.is_type(instance, type): + return + else: + reprs = [] + for type in types: + try: + reprs.append(repr(type["name"])) + except Exception: + reprs.append(repr(type)) + yield ValidationError( + f"{instance!r} is not of type {', '.join(reprs)}", + context=all_errors, + ) + + +def contains_draft6_draft7(validator, contains, instance, schema): + if not validator.is_type(instance, "array"): + return + + if not any( + validator.evolve(schema=contains).is_valid(element) + for element in instance + ): + yield ValidationError( + f"None of {instance!r} are valid under the given schema", + ) + + +def recursiveRef(validator, recursiveRef, instance, schema): + lookup_url, target = validator.resolver.resolution_scope, validator.schema + + for each in reversed(validator.resolver._scopes_stack[1:]): + lookup_url, next_target = validator.resolver.resolve(each) + if next_target.get("$recursiveAnchor"): + target = next_target + else: + break + + fragment = recursiveRef.lstrip("#") + subschema = validator.resolver.resolve_fragment(target, fragment) + # FIXME: This is gutted (and not calling .descend) because it can trigger + # recursion errors, so there's a bug here. Re-enable the tests to + # see it. + subschema + return [] + + +def find_evaluated_item_indexes_by_schema(validator, instance, schema): + """ + Get all indexes of items that get evaluated under the current schema + + Covers all keywords related to unevaluatedItems: items, prefixItems, if, + then, else, contains, unevaluatedItems, allOf, oneOf, anyOf + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_indexes = [] + + if "additionalItems" in schema: + return list(range(0, len(instance))) + + if "$ref" in schema: + scope, resolved = validator.resolver.resolve(schema["$ref"]) + validator.resolver.push_scope(scope) + + try: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, resolved, + ) + finally: + validator.resolver.pop_scope() + + if "items" in schema: + if validator.is_type(schema["items"], "object"): + return list(range(0, len(instance))) + evaluated_indexes += list(range(0, len(schema["items"]))) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["then"], + ) + else: + if "else" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["else"], + ) + + for keyword in ["contains", "unevaluatedItems"]: + if keyword in schema: + for k, v in enumerate(instance): + if validator.evolve(schema=schema[keyword]).is_valid(v): + evaluated_indexes.append(k) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = list(validator.descend(instance, subschema)) + if not errs: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, subschema, + ) + + return evaluated_indexes + + +def unevaluatedItems_draft2019(validator, unevaluatedItems, instance, schema): + if not validator.is_type(instance, "array"): + return + evaluated_item_indexes = find_evaluated_item_indexes_by_schema( + validator, instance, schema, + ) + unevaluated_items = [ + item for index, item in enumerate(instance) + if index not in evaluated_item_indexes + ] + if unevaluated_items: + error = "Unevaluated items are not allowed (%s %s unexpected)" + yield ValidationError(error % _utils.extras_msg(unevaluated_items)) diff --git a/src/poetry/core/_vendor/jsonschema/_types.py b/src/poetry/core/_vendor/jsonschema/_types.py new file mode 100644 index 0000000..5b543f7 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/_types.py @@ -0,0 +1,203 @@ +from __future__ import annotations + +import numbers +import typing + +from pyrsistent import pmap +from pyrsistent.typing import PMap +import attr + +from jsonschema.exceptions import UndefinedTypeCheck + + +# unfortunately, the type of pmap is generic, and if used as the attr.ib +# converter, the generic type is presented to mypy, which then fails to match +# the concrete type of a type checker mapping +# this "do nothing" wrapper presents the correct information to mypy +def _typed_pmap_converter( + init_val: typing.Mapping[ + str, + typing.Callable[["TypeChecker", typing.Any], bool], + ], +) -> PMap[str, typing.Callable[["TypeChecker", typing.Any], bool]]: + return pmap(init_val) + + +def is_array(checker, instance): + return isinstance(instance, list) + + +def is_bool(checker, instance): + return isinstance(instance, bool) + + +def is_integer(checker, instance): + # bool inherits from int, so ensure bools aren't reported as ints + if isinstance(instance, bool): + return False + return isinstance(instance, int) + + +def is_null(checker, instance): + return instance is None + + +def is_number(checker, instance): + # bool inherits from int, so ensure bools aren't reported as ints + if isinstance(instance, bool): + return False + return isinstance(instance, numbers.Number) + + +def is_object(checker, instance): + return isinstance(instance, dict) + + +def is_string(checker, instance): + return isinstance(instance, str) + + +def is_any(checker, instance): + return True + + +@attr.s(frozen=True, repr=False) +class TypeChecker: + """ + A :kw:`type` property checker. + + A `TypeChecker` performs type checking for a `Validator`, converting + between the defined JSON Schema types and some associated Python types or + objects. + + Modifying the behavior just mentioned by redefining which Python objects + are considered to be of which JSON Schema types can be done using + `TypeChecker.redefine` or `TypeChecker.redefine_many`, and types can be + removed via `TypeChecker.remove`. Each of these return a new `TypeChecker`. + + Arguments: + + type_checkers: + + The initial mapping of types to their checking functions. + """ + + _type_checkers: PMap[ + str, typing.Callable[["TypeChecker", typing.Any], bool], + ] = attr.ib( + default=pmap(), + converter=_typed_pmap_converter, + ) + + def __repr__(self): + types = ", ".join(repr(k) for k in sorted(self._type_checkers)) + return f"<{self.__class__.__name__} types={{{types}}}>" + + def is_type(self, instance, type: str) -> bool: + """ + Check if the instance is of the appropriate type. + + Arguments: + + instance: + + The instance to check + + type: + + The name of the type that is expected. + + Raises: + + `jsonschema.exceptions.UndefinedTypeCheck`: + + if ``type`` is unknown to this object. + """ + try: + fn = self._type_checkers[type] + except KeyError: + raise UndefinedTypeCheck(type) from None + + return fn(self, instance) + + def redefine(self, type: str, fn) -> "TypeChecker": + """ + Produce a new checker with the given type redefined. + + Arguments: + + type: + + The name of the type to check. + + fn (collections.abc.Callable): + + A callable taking exactly two parameters - the type + checker calling the function and the instance to check. + The function should return true if instance is of this + type and false otherwise. + """ + return self.redefine_many({type: fn}) + + def redefine_many(self, definitions=()) -> "TypeChecker": + """ + Produce a new checker with the given types redefined. + + Arguments: + + definitions (dict): + + A dictionary mapping types to their checking functions. + """ + type_checkers = self._type_checkers.update(definitions) + return attr.evolve(self, type_checkers=type_checkers) + + def remove(self, *types) -> "TypeChecker": + """ + Produce a new checker with the given types forgotten. + + Arguments: + + types: + + the names of the types to remove. + + Raises: + + `jsonschema.exceptions.UndefinedTypeCheck`: + + if any given type is unknown to this object + """ + + type_checkers = self._type_checkers + for each in types: + try: + type_checkers = type_checkers.remove(each) + except KeyError: + raise UndefinedTypeCheck(each) + return attr.evolve(self, type_checkers=type_checkers) + + +draft3_type_checker = TypeChecker( + { + "any": is_any, + "array": is_array, + "boolean": is_bool, + "integer": is_integer, + "object": is_object, + "null": is_null, + "number": is_number, + "string": is_string, + }, +) +draft4_type_checker = draft3_type_checker.remove("any") +draft6_type_checker = draft4_type_checker.redefine( + "integer", + lambda checker, instance: ( + is_integer(checker, instance) + or isinstance(instance, float) and instance.is_integer() + ), +) +draft7_type_checker = draft6_type_checker +draft201909_type_checker = draft7_type_checker +draft202012_type_checker = draft201909_type_checker diff --git a/src/poetry/core/_vendor/jsonschema/_utils.py b/src/poetry/core/_vendor/jsonschema/_utils.py new file mode 100644 index 0000000..a31ab43 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/_utils.py @@ -0,0 +1,345 @@ +from collections.abc import Mapping, MutableMapping, Sequence +from urllib.parse import urlsplit +import itertools +import json +import os +import re + +class URIDict(MutableMapping): + """ + Dictionary which uses normalized URIs as keys. + """ + + def normalize(self, uri): + return urlsplit(uri).geturl() + + def __init__(self, *args, **kwargs): + self.store = dict() + self.store.update(*args, **kwargs) + + def __getitem__(self, uri): + return self.store[self.normalize(uri)] + + def __setitem__(self, uri, value): + self.store[self.normalize(uri)] = value + + def __delitem__(self, uri): + del self.store[self.normalize(uri)] + + def __iter__(self): + return iter(self.store) + + def __len__(self): + return len(self.store) + + def __repr__(self): + return repr(self.store) + + +class Unset: + """ + An as-of-yet unset attribute or unprovided default parameter. + """ + + def __repr__(self): + return "" + + +def load_schema(name): + """ + Load a schema from ./schemas/``name``.json and return it. + """ + with open( + os.path.join(os.path.dirname(__file__), "schemas", "{0}.json".format(name)), + encoding="utf-8" + ) as f: + data = f.read() + + return json.loads(data) + + +def format_as_index(container, indices): + """ + Construct a single string containing indexing operations for the indices. + + For example for a container ``bar``, [1, 2, "foo"] -> bar[1][2]["foo"] + + Arguments: + + container (str): + + A word to use for the thing being indexed + + indices (sequence): + + The indices to format. + """ + + if not indices: + return container + return f"{container}[{']['.join(repr(index) for index in indices)}]" + + +def find_additional_properties(instance, schema): + """ + Return the set of additional properties for the given ``instance``. + + Weeds out properties that should have been validated by ``properties`` and + / or ``patternProperties``. + + Assumes ``instance`` is dict-like already. + """ + + properties = schema.get("properties", {}) + patterns = "|".join(schema.get("patternProperties", {})) + for property in instance: + if property not in properties: + if patterns and re.search(patterns, property): + continue + yield property + + +def extras_msg(extras): + """ + Create an error message for extra items or properties. + """ + + if len(extras) == 1: + verb = "was" + else: + verb = "were" + return ", ".join(repr(extra) for extra in sorted(extras)), verb + + +def ensure_list(thing): + """ + Wrap ``thing`` in a list if it's a single str. + + Otherwise, return it unchanged. + """ + + if isinstance(thing, str): + return [thing] + return thing + + +def _mapping_equal(one, two): + """ + Check if two mappings are equal using the semantics of `equal`. + """ + if len(one) != len(two): + return False + return all( + key in two and equal(value, two[key]) + for key, value in one.items() + ) + + +def _sequence_equal(one, two): + """ + Check if two sequences are equal using the semantics of `equal`. + """ + if len(one) != len(two): + return False + return all(equal(i, j) for i, j in zip(one, two)) + + +def equal(one, two): + """ + Check if two things are equal evading some Python type hierarchy semantics. + + Specifically in JSON Schema, evade `bool` inheriting from `int`, + recursing into sequences to do the same. + """ + if isinstance(one, str) or isinstance(two, str): + return one == two + if isinstance(one, Sequence) and isinstance(two, Sequence): + return _sequence_equal(one, two) + if isinstance(one, Mapping) and isinstance(two, Mapping): + return _mapping_equal(one, two) + return unbool(one) == unbool(two) + + +def unbool(element, true=object(), false=object()): + """ + A hack to make True and 1 and False and 0 unique for ``uniq``. + """ + + if element is True: + return true + elif element is False: + return false + return element + + +def uniq(container): + """ + Check if all of a container's elements are unique. + + Tries to rely on the container being recursively sortable, or otherwise + falls back on (slow) brute force. + """ + try: + sort = sorted(unbool(i) for i in container) + sliced = itertools.islice(sort, 1, None) + + for i, j in zip(sort, sliced): + if equal(i, j): + return False + + except (NotImplementedError, TypeError): + seen = [] + for e in container: + e = unbool(e) + + for i in seen: + if equal(i, e): + return False + + seen.append(e) + return True + + +def find_evaluated_item_indexes_by_schema(validator, instance, schema): + """ + Get all indexes of items that get evaluated under the current schema + + Covers all keywords related to unevaluatedItems: items, prefixItems, if, + then, else, contains, unevaluatedItems, allOf, oneOf, anyOf + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_indexes = [] + + if "items" in schema: + return list(range(0, len(instance))) + + if "$ref" in schema: + scope, resolved = validator.resolver.resolve(schema["$ref"]) + validator.resolver.push_scope(scope) + + try: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, resolved, + ) + finally: + validator.resolver.pop_scope() + + if "prefixItems" in schema: + evaluated_indexes += list(range(0, len(schema["prefixItems"]))) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["then"], + ) + else: + if "else" in schema: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, schema["else"], + ) + + for keyword in ["contains", "unevaluatedItems"]: + if keyword in schema: + for k, v in enumerate(instance): + if validator.evolve(schema=schema[keyword]).is_valid(v): + evaluated_indexes.append(k) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = list(validator.descend(instance, subschema)) + if not errs: + evaluated_indexes += find_evaluated_item_indexes_by_schema( + validator, instance, subschema, + ) + + return evaluated_indexes + + +def find_evaluated_property_keys_by_schema(validator, instance, schema): + """ + Get all keys of items that get evaluated under the current schema + + Covers all keywords related to unevaluatedProperties: properties, + additionalProperties, unevaluatedProperties, patternProperties, + dependentSchemas, allOf, oneOf, anyOf, if, then, else + """ + if validator.is_type(schema, "boolean"): + return [] + evaluated_keys = [] + + if "$ref" in schema: + scope, resolved = validator.resolver.resolve(schema["$ref"]) + validator.resolver.push_scope(scope) + + try: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, resolved, + ) + finally: + validator.resolver.pop_scope() + + for keyword in [ + "properties", "additionalProperties", "unevaluatedProperties", + ]: + if keyword in schema: + if validator.is_type(schema[keyword], "boolean"): + for property, value in instance.items(): + if validator.evolve(schema=schema[keyword]).is_valid( + {property: value}, + ): + evaluated_keys.append(property) + + if validator.is_type(schema[keyword], "object"): + for property, subschema in schema[keyword].items(): + if property in instance and validator.evolve( + schema=subschema, + ).is_valid(instance[property]): + evaluated_keys.append(property) + + if "patternProperties" in schema: + for property, value in instance.items(): + for pattern, _ in schema["patternProperties"].items(): + if re.search(pattern, property) and validator.evolve( + schema=schema["patternProperties"], + ).is_valid({property: value}): + evaluated_keys.append(property) + + if "dependentSchemas" in schema: + for property, subschema in schema["dependentSchemas"].items(): + if property not in instance: + continue + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + for keyword in ["allOf", "oneOf", "anyOf"]: + if keyword in schema: + for subschema in schema[keyword]: + errs = list(validator.descend(instance, subschema)) + if not errs: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, subschema, + ) + + if "if" in schema: + if validator.evolve(schema=schema["if"]).is_valid(instance): + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["if"], + ) + if "then" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["then"], + ) + else: + if "else" in schema: + evaluated_keys += find_evaluated_property_keys_by_schema( + validator, instance, schema["else"], + ) + + return evaluated_keys diff --git a/src/poetry/core/_vendor/jsonschema/_validators.py b/src/poetry/core/_vendor/jsonschema/_validators.py new file mode 100644 index 0000000..874e879 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/_validators.py @@ -0,0 +1,467 @@ +from fractions import Fraction +from urllib.parse import urldefrag, urljoin +import re + +from jsonschema._utils import ( + ensure_list, + equal, + extras_msg, + find_additional_properties, + find_evaluated_item_indexes_by_schema, + find_evaluated_property_keys_by_schema, + unbool, + uniq, +) +from jsonschema.exceptions import FormatError, ValidationError + + +def patternProperties(validator, patternProperties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for pattern, subschema in patternProperties.items(): + for k, v in instance.items(): + if re.search(pattern, k): + yield from validator.descend( + v, subschema, path=k, schema_path=pattern, + ) + + +def propertyNames(validator, propertyNames, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property in instance: + yield from validator.descend(instance=property, schema=propertyNames) + + +def additionalProperties(validator, aP, instance, schema): + if not validator.is_type(instance, "object"): + return + + extras = set(find_additional_properties(instance, schema)) + + if validator.is_type(aP, "object"): + for extra in extras: + yield from validator.descend(instance[extra], aP, path=extra) + elif not aP and extras: + if "patternProperties" in schema: + if len(extras) == 1: + verb = "does" + else: + verb = "do" + + joined = ", ".join(repr(each) for each in sorted(extras)) + patterns = ", ".join( + repr(each) for each in sorted(schema["patternProperties"]) + ) + error = f"{joined} {verb} not match any of the regexes: {patterns}" + yield ValidationError(error) + else: + error = "Additional properties are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(extras)) + + +def items(validator, items, instance, schema): + if not validator.is_type(instance, "array"): + return + + prefix = len(schema.get("prefixItems", [])) + total = len(instance) + if items is False and total > prefix: + message = f"Expected at most {prefix} items, but found {total}" + yield ValidationError(message) + else: + for index in range(prefix, total): + yield from validator.descend( + instance=instance[index], + schema=items, + path=index, + ) + + +def additionalItems(validator, aI, instance, schema): + if ( + not validator.is_type(instance, "array") + or validator.is_type(schema.get("items", {}), "object") + ): + return + + len_items = len(schema.get("items", [])) + if validator.is_type(aI, "object"): + for index, item in enumerate(instance[len_items:], start=len_items): + yield from validator.descend(item, aI, path=index) + elif not aI and len(instance) > len(schema.get("items", [])): + error = "Additional items are not allowed (%s %s unexpected)" + yield ValidationError( + error % extras_msg(instance[len(schema.get("items", [])):]), + ) + + +def const(validator, const, instance, schema): + if not equal(instance, const): + yield ValidationError(f"{const!r} was expected") + + +def contains(validator, contains, instance, schema): + if not validator.is_type(instance, "array"): + return + + matches = 0 + min_contains = schema.get("minContains", 1) + max_contains = schema.get("maxContains", len(instance)) + + for each in instance: + if validator.evolve(schema=contains).is_valid(each): + matches += 1 + if matches > max_contains: + yield ValidationError( + "Too many items match the given schema " + f"(expected at most {max_contains})", + validator="maxContains", + validator_value=max_contains, + ) + return + + if matches < min_contains: + if not matches: + yield ValidationError( + f"{instance!r} does not contain items " + "matching the given schema", + ) + else: + yield ValidationError( + "Too few items match the given schema (expected at least " + f"{min_contains} but only {matches} matched)", + validator="minContains", + validator_value=min_contains, + ) + + +def exclusiveMinimum(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance <= minimum: + yield ValidationError( + f"{instance!r} is less than or equal to " + f"the minimum of {minimum!r}", + ) + + +def exclusiveMaximum(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance >= maximum: + yield ValidationError( + f"{instance!r} is greater than or equal " + f"to the maximum of {maximum!r}", + ) + + +def minimum(validator, minimum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance < minimum: + message = f"{instance!r} is less than the minimum of {minimum!r}" + yield ValidationError(message) + + +def maximum(validator, maximum, instance, schema): + if not validator.is_type(instance, "number"): + return + + if instance > maximum: + message = f"{instance!r} is greater than the maximum of {maximum!r}" + yield ValidationError(message) + + +def multipleOf(validator, dB, instance, schema): + if not validator.is_type(instance, "number"): + return + + if isinstance(dB, float): + quotient = instance / dB + try: + failed = int(quotient) != quotient + except OverflowError: + # When `instance` is large and `dB` is less than one, + # quotient can overflow to infinity; and then casting to int + # raises an error. + # + # In this case we fall back to Fraction logic, which is + # exact and cannot overflow. The performance is also + # acceptable: we try the fast all-float option first, and + # we know that fraction(dB) can have at most a few hundred + # digits in each part. The worst-case slowdown is therefore + # for already-slow enormous integers or Decimals. + failed = (Fraction(instance) / Fraction(dB)).denominator != 1 + else: + failed = instance % dB + + if failed: + yield ValidationError(f"{instance!r} is not a multiple of {dB}") + + +def minItems(validator, mI, instance, schema): + if validator.is_type(instance, "array") and len(instance) < mI: + yield ValidationError(f"{instance!r} is too short") + + +def maxItems(validator, mI, instance, schema): + if validator.is_type(instance, "array") and len(instance) > mI: + yield ValidationError(f"{instance!r} is too long") + + +def uniqueItems(validator, uI, instance, schema): + if ( + uI + and validator.is_type(instance, "array") + and not uniq(instance) + ): + yield ValidationError(f"{instance!r} has non-unique elements") + + +def pattern(validator, patrn, instance, schema): + if ( + validator.is_type(instance, "string") + and not re.search(patrn, instance) + ): + yield ValidationError(f"{instance!r} does not match {patrn!r}") + + +def format(validator, format, instance, schema): + if validator.format_checker is not None: + try: + validator.format_checker.check(instance, format) + except FormatError as error: + yield ValidationError(error.message, cause=error.cause) + + +def minLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) < mL: + yield ValidationError(f"{instance!r} is too short") + + +def maxLength(validator, mL, instance, schema): + if validator.is_type(instance, "string") and len(instance) > mL: + yield ValidationError(f"{instance!r} is too long") + + +def dependentRequired(validator, dependentRequired, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependentRequired.items(): + if property not in instance: + continue + + for each in dependency: + if each not in instance: + message = f"{each!r} is a dependency of {property!r}" + yield ValidationError(message) + + +def dependentSchemas(validator, dependentSchemas, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, dependency in dependentSchemas.items(): + if property not in instance: + continue + yield from validator.descend( + instance, dependency, schema_path=property, + ) + + +def enum(validator, enums, instance, schema): + if instance == 0 or instance == 1: + unbooled = unbool(instance) + if all(unbooled != unbool(each) for each in enums): + yield ValidationError(f"{instance!r} is not one of {enums!r}") + elif instance not in enums: + yield ValidationError(f"{instance!r} is not one of {enums!r}") + + +def ref(validator, ref, instance, schema): + resolve = getattr(validator.resolver, "resolve", None) + if resolve is None: + with validator.resolver.resolving(ref) as resolved: + yield from validator.descend(instance, resolved) + else: + scope, resolved = validator.resolver.resolve(ref) + validator.resolver.push_scope(scope) + + try: + yield from validator.descend(instance, resolved) + finally: + validator.resolver.pop_scope() + + +def dynamicRef(validator, dynamicRef, instance, schema): + _, fragment = urldefrag(dynamicRef) + + for url in validator.resolver._scopes_stack: + lookup_url = urljoin(url, dynamicRef) + with validator.resolver.resolving(lookup_url) as subschema: + if ("$dynamicAnchor" in subschema + and fragment == subschema["$dynamicAnchor"]): + yield from validator.descend(instance, subschema) + break + else: + with validator.resolver.resolving(dynamicRef) as subschema: + yield from validator.descend(instance, subschema) + + +def type(validator, types, instance, schema): + types = ensure_list(types) + + if not any(validator.is_type(instance, type) for type in types): + reprs = ", ".join(repr(type) for type in types) + yield ValidationError(f"{instance!r} is not of type {reprs}") + + +def properties(validator, properties, instance, schema): + if not validator.is_type(instance, "object"): + return + + for property, subschema in properties.items(): + if property in instance: + yield from validator.descend( + instance[property], + subschema, + path=property, + schema_path=property, + ) + + +def required(validator, required, instance, schema): + if not validator.is_type(instance, "object"): + return + for property in required: + if property not in instance: + yield ValidationError(f"{property!r} is a required property") + + +def minProperties(validator, mP, instance, schema): + if validator.is_type(instance, "object") and len(instance) < mP: + yield ValidationError(f"{instance!r} does not have enough properties") + + +def maxProperties(validator, mP, instance, schema): + if not validator.is_type(instance, "object"): + return + if validator.is_type(instance, "object") and len(instance) > mP: + yield ValidationError(f"{instance!r} has too many properties") + + +def allOf(validator, allOf, instance, schema): + for index, subschema in enumerate(allOf): + yield from validator.descend(instance, subschema, schema_path=index) + + +def anyOf(validator, anyOf, instance, schema): + all_errors = [] + for index, subschema in enumerate(anyOf): + errs = list(validator.descend(instance, subschema, schema_path=index)) + if not errs: + break + all_errors.extend(errs) + else: + yield ValidationError( + f"{instance!r} is not valid under any of the given schemas", + context=all_errors, + ) + + +def oneOf(validator, oneOf, instance, schema): + subschemas = enumerate(oneOf) + all_errors = [] + for index, subschema in subschemas: + errs = list(validator.descend(instance, subschema, schema_path=index)) + if not errs: + first_valid = subschema + break + all_errors.extend(errs) + else: + yield ValidationError( + f"{instance!r} is not valid under any of the given schemas", + context=all_errors, + ) + + more_valid = [ + each for _, each in subschemas + if validator.evolve(schema=each).is_valid(instance) + ] + if more_valid: + more_valid.append(first_valid) + reprs = ", ".join(repr(schema) for schema in more_valid) + yield ValidationError(f"{instance!r} is valid under each of {reprs}") + + +def not_(validator, not_schema, instance, schema): + if validator.evolve(schema=not_schema).is_valid(instance): + message = f"{instance!r} should not be valid under {not_schema!r}" + yield ValidationError(message) + + +def if_(validator, if_schema, instance, schema): + if validator.evolve(schema=if_schema).is_valid(instance): + if "then" in schema: + then = schema["then"] + yield from validator.descend(instance, then, schema_path="then") + elif "else" in schema: + else_ = schema["else"] + yield from validator.descend(instance, else_, schema_path="else") + + +def unevaluatedItems(validator, unevaluatedItems, instance, schema): + if not validator.is_type(instance, "array"): + return + evaluated_item_indexes = find_evaluated_item_indexes_by_schema( + validator, instance, schema, + ) + unevaluated_items = [ + item for index, item in enumerate(instance) + if index not in evaluated_item_indexes + ] + if unevaluated_items: + error = "Unevaluated items are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(unevaluated_items)) + + +def unevaluatedProperties(validator, unevaluatedProperties, instance, schema): + if not validator.is_type(instance, "object"): + return + evaluated_property_keys = find_evaluated_property_keys_by_schema( + validator, instance, schema, + ) + unevaluated_property_keys = [] + for property in instance: + if property not in evaluated_property_keys: + for _ in validator.descend( + instance[property], + unevaluatedProperties, + path=property, + schema_path=property, + ): + unevaluated_property_keys.append(property) + + if unevaluated_property_keys: + error = "Unevaluated properties are not allowed (%s %s unexpected)" + yield ValidationError(error % extras_msg(unevaluated_property_keys)) + + +def prefixItems(validator, prefixItems, instance, schema): + if not validator.is_type(instance, "array"): + return + + for (index, item), subschema in zip(enumerate(instance), prefixItems): + yield from validator.descend( + instance=item, + schema=subschema, + schema_path=index, + path=index, + ) diff --git a/src/poetry/core/_vendor/jsonschema/benchmarks/__init__.py b/src/poetry/core/_vendor/jsonschema/benchmarks/__init__.py new file mode 100644 index 0000000..e3dcc68 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/benchmarks/__init__.py @@ -0,0 +1,5 @@ +""" +Benchmarks for validation. + +This package is *not* public API. +""" diff --git a/src/poetry/core/_vendor/jsonschema/benchmarks/issue232.py b/src/poetry/core/_vendor/jsonschema/benchmarks/issue232.py new file mode 100644 index 0000000..bf357e9 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/benchmarks/issue232.py @@ -0,0 +1,25 @@ +""" +A performance benchmark using the example from issue #232. + +See https://github.com/python-jsonschema/jsonschema/pull/232. +""" +from pathlib import Path + +from pyperf import Runner +from pyrsistent import m + +from jsonschema.tests._suite import Version +import jsonschema + +issue232 = Version( + path=Path(__file__).parent / "issue232", + remotes=m(), + name="issue232", +) + + +if __name__ == "__main__": + issue232.benchmark( + runner=Runner(), + Validator=jsonschema.Draft4Validator, + ) diff --git a/src/poetry/core/_vendor/jsonschema/benchmarks/issue232/issue.json b/src/poetry/core/_vendor/jsonschema/benchmarks/issue232/issue.json new file mode 100644 index 0000000..804c340 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/benchmarks/issue232/issue.json @@ -0,0 +1,2653 @@ +[ + { + "description": "Petstore", + "schema": { + "title": "A JSON Schema for Swagger 2.0 API.", + "id": "http://swagger.io/v2/schema.json#", + "$schema": "http://json-schema.org/draft-04/schema#", + "type": "object", + "required": [ + "swagger", + "info", + "paths" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "swagger": { + "type": "string", + "enum": [ + "2.0" + ], + "description": "The Swagger version of this document." + }, + "info": { + "$ref": "#/definitions/info" + }, + "host": { + "type": "string", + "pattern": "^[^{}/ :\\\\]+(?::\\d+)?$", + "description": "The host (name or ip) of the API. Example: 'swagger.io'" + }, + "basePath": { + "type": "string", + "pattern": "^/", + "description": "The base path to the API. Example: '/api'." + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "consumes": { + "description": "A list of MIME types accepted by the API.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "paths": { + "$ref": "#/definitions/paths" + }, + "definitions": { + "$ref": "#/definitions/definitions" + }, + "parameters": { + "$ref": "#/definitions/parameterDefinitions" + }, + "responses": { + "$ref": "#/definitions/responseDefinitions" + }, + "security": { + "$ref": "#/definitions/security" + }, + "securityDefinitions": { + "$ref": "#/definitions/securityDefinitions" + }, + "tags": { + "type": "array", + "items": { + "$ref": "#/definitions/tag" + }, + "uniqueItems": true + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "definitions": { + "info": { + "type": "object", + "description": "General information about the API.", + "required": [ + "version", + "title" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "title": { + "type": "string", + "description": "A unique and precise title of the API." + }, + "version": { + "type": "string", + "description": "A semantic version number of the API." + }, + "description": { + "type": "string", + "description": "A longer description of the API. Should be different from the title. GitHub Flavored Markdown is allowed." + }, + "termsOfService": { + "type": "string", + "description": "The terms of service for the API." + }, + "contact": { + "$ref": "#/definitions/contact" + }, + "license": { + "$ref": "#/definitions/license" + } + } + }, + "contact": { + "type": "object", + "description": "Contact information for the owners of the API.", + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The identifying name of the contact person/organization." + }, + "url": { + "type": "string", + "description": "The URL pointing to the contact information.", + "format": "uri" + }, + "email": { + "type": "string", + "description": "The email address of the contact person/organization.", + "format": "email" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "license": { + "type": "object", + "required": [ + "name" + ], + "additionalProperties": false, + "properties": { + "name": { + "type": "string", + "description": "The name of the license type. It's encouraged to use an OSI compatible license." + }, + "url": { + "type": "string", + "description": "The URL pointing to the license.", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "paths": { + "type": "object", + "description": "Relative paths to the individual endpoints. They must be relative to the 'basePath'.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + }, + "^/": { + "$ref": "#/definitions/pathItem" + } + }, + "additionalProperties": false + }, + "definitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "description": "One or more JSON objects describing the schemas being consumed and produced by the API." + }, + "parameterDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/parameter" + }, + "description": "One or more JSON representations for parameters" + }, + "responseDefinitions": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/response" + }, + "description": "One or more JSON representations for parameters" + }, + "externalDocs": { + "type": "object", + "additionalProperties": false, + "description": "information about external documentation", + "required": [ + "url" + ], + "properties": { + "description": { + "type": "string" + }, + "url": { + "type": "string", + "format": "uri" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "examples": { + "type": "object", + "additionalProperties": true + }, + "mimeType": { + "type": "string", + "description": "The MIME type of the HTTP message." + }, + "operation": { + "type": "object", + "required": [ + "responses" + ], + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "tags": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + }, + "summary": { + "type": "string", + "description": "A brief summary of the operation." + }, + "description": { + "type": "string", + "description": "A longer description of the operation, GitHub Flavored Markdown is allowed." + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "operationId": { + "type": "string", + "description": "A unique identifier of the operation." + }, + "produces": { + "description": "A list of MIME types the API can produce.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "consumes": { + "description": "A list of MIME types the API can consume.", + "allOf": [ + { + "$ref": "#/definitions/mediaTypeList" + } + ] + }, + "parameters": { + "$ref": "#/definitions/parametersList" + }, + "responses": { + "$ref": "#/definitions/responses" + }, + "schemes": { + "$ref": "#/definitions/schemesList" + }, + "deprecated": { + "type": "boolean", + "default": false + }, + "security": { + "$ref": "#/definitions/security" + } + } + }, + "pathItem": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "get": { + "$ref": "#/definitions/operation" + }, + "put": { + "$ref": "#/definitions/operation" + }, + "post": { + "$ref": "#/definitions/operation" + }, + "delete": { + "$ref": "#/definitions/operation" + }, + "options": { + "$ref": "#/definitions/operation" + }, + "head": { + "$ref": "#/definitions/operation" + }, + "patch": { + "$ref": "#/definitions/operation" + }, + "parameters": { + "$ref": "#/definitions/parametersList" + } + } + }, + "responses": { + "type": "object", + "description": "Response objects names can either be any valid HTTP status code or 'default'.", + "minProperties": 1, + "additionalProperties": false, + "patternProperties": { + "^([0-9]{3})$|^(default)$": { + "$ref": "#/definitions/responseValue" + }, + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "not": { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + } + }, + "responseValue": { + "oneOf": [ + { + "$ref": "#/definitions/response" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "response": { + "type": "object", + "required": [ + "description" + ], + "properties": { + "description": { + "type": "string" + }, + "schema": { + "oneOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "$ref": "#/definitions/fileSchema" + } + ] + }, + "headers": { + "$ref": "#/definitions/headers" + }, + "examples": { + "$ref": "#/definitions/examples" + } + }, + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "headers": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/header" + } + }, + "header": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "vendorExtension": { + "description": "Any property starting with x- is valid.", + "additionalProperties": true, + "additionalItems": true + }, + "bodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "schema" + ], + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "body" + ] + }, + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "schema": { + "$ref": "#/definitions/schema" + } + }, + "additionalProperties": false + }, + "headerParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "header" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "queryParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "query" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "formDataParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "required": { + "type": "boolean", + "description": "Determines whether or not this parameter is required or optional.", + "default": false + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "formData" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "allowEmptyValue": { + "type": "boolean", + "default": false, + "description": "allows sending a parameter by name only or with an empty value." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array", + "file" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormatWithMulti" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "pathParameterSubSchema": { + "additionalProperties": false, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "required" + ], + "properties": { + "required": { + "type": "boolean", + "enum": [ + true + ], + "description": "Determines whether or not this parameter is required or optional." + }, + "in": { + "type": "string", + "description": "Determines the location of the parameter.", + "enum": [ + "path" + ] + }, + "description": { + "type": "string", + "description": "A brief description of the parameter. This could contain examples of use. GitHub Flavored Markdown is allowed." + }, + "name": { + "type": "string", + "description": "The name of the parameter." + }, + "type": { + "type": "string", + "enum": [ + "string", + "number", + "boolean", + "integer", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + } + }, + "nonBodyParameter": { + "type": "object", + "required": [ + "name", + "in", + "type" + ], + "oneOf": [ + { + "$ref": "#/definitions/headerParameterSubSchema" + }, + { + "$ref": "#/definitions/formDataParameterSubSchema" + }, + { + "$ref": "#/definitions/queryParameterSubSchema" + }, + { + "$ref": "#/definitions/pathParameterSubSchema" + } + ] + }, + "parameter": { + "oneOf": [ + { + "$ref": "#/definitions/bodyParameter" + }, + { + "$ref": "#/definitions/nonBodyParameter" + } + ] + }, + "schema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "properties": { + "$ref": { + "type": "string" + }, + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "maxProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minProperties": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "additionalProperties": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "boolean" + } + ], + "default": {} + }, + "type": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/type" + }, + "items": { + "anyOf": [ + { + "$ref": "#/definitions/schema" + }, + { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + } + ], + "default": {} + }, + "allOf": { + "type": "array", + "minItems": 1, + "items": { + "$ref": "#/definitions/schema" + } + }, + "properties": { + "type": "object", + "additionalProperties": { + "$ref": "#/definitions/schema" + }, + "default": {} + }, + "discriminator": { + "type": "string" + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "xml": { + "$ref": "#/definitions/xml" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "fileSchema": { + "type": "object", + "description": "A deterministic version of a JSON Schema object.", + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + }, + "required": [ + "type" + ], + "properties": { + "format": { + "type": "string" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "required": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/stringArray" + }, + "type": { + "type": "string", + "enum": [ + "file" + ] + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + }, + "example": {} + }, + "additionalProperties": false + }, + "primitivesItems": { + "type": "object", + "additionalProperties": false, + "properties": { + "type": { + "type": "string", + "enum": [ + "string", + "number", + "integer", + "boolean", + "array" + ] + }, + "format": { + "type": "string" + }, + "items": { + "$ref": "#/definitions/primitivesItems" + }, + "collectionFormat": { + "$ref": "#/definitions/collectionFormat" + }, + "default": { + "$ref": "#/definitions/default" + }, + "maximum": { + "$ref": "#/definitions/maximum" + }, + "exclusiveMaximum": { + "$ref": "#/definitions/exclusiveMaximum" + }, + "minimum": { + "$ref": "#/definitions/minimum" + }, + "exclusiveMinimum": { + "$ref": "#/definitions/exclusiveMinimum" + }, + "maxLength": { + "$ref": "#/definitions/maxLength" + }, + "minLength": { + "$ref": "#/definitions/minLength" + }, + "pattern": { + "$ref": "#/definitions/pattern" + }, + "maxItems": { + "$ref": "#/definitions/maxItems" + }, + "minItems": { + "$ref": "#/definitions/minItems" + }, + "uniqueItems": { + "$ref": "#/definitions/uniqueItems" + }, + "enum": { + "$ref": "#/definitions/enum" + }, + "multipleOf": { + "$ref": "#/definitions/multipleOf" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "security": { + "type": "array", + "items": { + "$ref": "#/definitions/securityRequirement" + }, + "uniqueItems": true + }, + "securityRequirement": { + "type": "object", + "additionalProperties": { + "type": "array", + "items": { + "type": "string" + }, + "uniqueItems": true + } + }, + "xml": { + "type": "object", + "additionalProperties": false, + "properties": { + "name": { + "type": "string" + }, + "namespace": { + "type": "string" + }, + "prefix": { + "type": "string" + }, + "attribute": { + "type": "boolean", + "default": false + }, + "wrapped": { + "type": "boolean", + "default": false + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "tag": { + "type": "object", + "additionalProperties": false, + "required": [ + "name" + ], + "properties": { + "name": { + "type": "string" + }, + "description": { + "type": "string" + }, + "externalDocs": { + "$ref": "#/definitions/externalDocs" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "securityDefinitions": { + "type": "object", + "additionalProperties": { + "oneOf": [ + { + "$ref": "#/definitions/basicAuthenticationSecurity" + }, + { + "$ref": "#/definitions/apiKeySecurity" + }, + { + "$ref": "#/definitions/oauth2ImplicitSecurity" + }, + { + "$ref": "#/definitions/oauth2PasswordSecurity" + }, + { + "$ref": "#/definitions/oauth2ApplicationSecurity" + }, + { + "$ref": "#/definitions/oauth2AccessCodeSecurity" + } + ] + } + }, + "basicAuthenticationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "basic" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "apiKeySecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "name", + "in" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "apiKey" + ] + }, + "name": { + "type": "string" + }, + "in": { + "type": "string", + "enum": [ + "header", + "query" + ] + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ImplicitSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "implicit" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2PasswordSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "password" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2ApplicationSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "application" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2AccessCodeSecurity": { + "type": "object", + "additionalProperties": false, + "required": [ + "type", + "flow", + "authorizationUrl", + "tokenUrl" + ], + "properties": { + "type": { + "type": "string", + "enum": [ + "oauth2" + ] + }, + "flow": { + "type": "string", + "enum": [ + "accessCode" + ] + }, + "scopes": { + "$ref": "#/definitions/oauth2Scopes" + }, + "authorizationUrl": { + "type": "string", + "format": "uri" + }, + "tokenUrl": { + "type": "string", + "format": "uri" + }, + "description": { + "type": "string" + } + }, + "patternProperties": { + "^x-": { + "$ref": "#/definitions/vendorExtension" + } + } + }, + "oauth2Scopes": { + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "mediaTypeList": { + "type": "array", + "items": { + "$ref": "#/definitions/mimeType" + }, + "uniqueItems": true + }, + "parametersList": { + "type": "array", + "description": "The parameters needed to send a valid API call.", + "additionalItems": false, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/parameter" + }, + { + "$ref": "#/definitions/jsonReference" + } + ] + }, + "uniqueItems": true + }, + "schemesList": { + "type": "array", + "description": "The transfer protocol of the API.", + "items": { + "type": "string", + "enum": [ + "http", + "https", + "ws", + "wss" + ] + }, + "uniqueItems": true + }, + "collectionFormat": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes" + ], + "default": "csv" + }, + "collectionFormatWithMulti": { + "type": "string", + "enum": [ + "csv", + "ssv", + "tsv", + "pipes", + "multi" + ], + "default": "csv" + }, + "title": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/title" + }, + "description": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/description" + }, + "default": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/default" + }, + "multipleOf": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/multipleOf" + }, + "maximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/maximum" + }, + "exclusiveMaximum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMaximum" + }, + "minimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/minimum" + }, + "exclusiveMinimum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/exclusiveMinimum" + }, + "maxLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minLength": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "pattern": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/pattern" + }, + "maxItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveInteger" + }, + "minItems": { + "$ref": "http://json-schema.org/draft-04/schema#/definitions/positiveIntegerDefault0" + }, + "uniqueItems": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/uniqueItems" + }, + "enum": { + "$ref": "http://json-schema.org/draft-04/schema#/properties/enum" + }, + "jsonReference": { + "type": "object", + "required": [ + "$ref" + ], + "additionalProperties": false, + "properties": { + "$ref": { + "type": "string" + } + } + } + } + }, + "tests": [ + { + "description": "Example petsore", + "data": { + "swagger": "2.0", + "info": { + "description": "This is a sample server Petstore server. You can find out more about Swagger at [http://swagger.io](http://swagger.io) or on [irc.freenode.net, #swagger](http://swagger.io/irc/). For this sample, you can use the api key `special-key` to test the authorization filters.", + "version": "1.0.0", + "title": "Swagger Petstore", + "termsOfService": "http://swagger.io/terms/", + "contact": { + "email": "apiteam@swagger.io" + }, + "license": { + "name": "Apache 2.0", + "url": "http://www.apache.org/licenses/LICENSE-2.0.html" + } + }, + "host": "petstore.swagger.io", + "basePath": "/v2", + "tags": [ + { + "name": "pet", + "description": "Everything about your Pets", + "externalDocs": { + "description": "Find out more", + "url": "http://swagger.io" + } + }, + { + "name": "store", + "description": "Access to Petstore orders" + }, + { + "name": "user", + "description": "Operations about user", + "externalDocs": { + "description": "Find out more about our store", + "url": "http://swagger.io" + } + } + ], + "schemes": [ + "http" + ], + "paths": { + "/pet": { + "post": { + "tags": [ + "pet" + ], + "summary": "Add a new pet to the store", + "description": "", + "operationId": "addPet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "put": { + "tags": [ + "pet" + ], + "summary": "Update an existing pet", + "description": "", + "operationId": "updatePet", + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Pet object that needs to be added to the store", + "required": true, + "schema": { + "$ref": "#/definitions/Pet" + } + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + }, + "405": { + "description": "Validation exception" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByStatus": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by status", + "description": "Multiple status values can be provided with comma separated strings", + "operationId": "findPetsByStatus", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "status", + "in": "query", + "description": "Status values that need to be considered for filter", + "required": true, + "type": "array", + "items": { + "type": "string", + "enum": [ + "available", + "pending", + "sold" + ], + "default": "available" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid status value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/findByTags": { + "get": { + "tags": [ + "pet" + ], + "summary": "Finds Pets by tags", + "description": "Muliple tags can be provided with comma separated strings. Use tag1, tag2, tag3 for testing.", + "operationId": "findPetsByTags", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "tags", + "in": "query", + "description": "Tags to filter by", + "required": true, + "type": "array", + "items": { + "type": "string" + }, + "collectionFormat": "multi" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "400": { + "description": "Invalid tag value" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ], + "deprecated": true + } + }, + "/pet/{petId}": { + "get": { + "tags": [ + "pet" + ], + "summary": "Find pet by ID", + "description": "Returns a single pet", + "operationId": "getPetById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to return", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "api_key": [] + } + ] + }, + "post": { + "tags": [ + "pet" + ], + "summary": "Updates a pet in the store with form data", + "description": "", + "operationId": "updatePetWithForm", + "consumes": [ + "application/x-www-form-urlencoded" + ], + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet that needs to be updated", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "name", + "in": "formData", + "description": "Updated name of the pet", + "required": false, + "type": "string" + }, + { + "name": "status", + "in": "formData", + "description": "Updated status of the pet", + "required": false, + "type": "string" + } + ], + "responses": { + "405": { + "description": "Invalid input" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + }, + "delete": { + "tags": [ + "pet" + ], + "summary": "Deletes a pet", + "description": "", + "operationId": "deletePet", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "api_key", + "in": "header", + "required": false, + "type": "string" + }, + { + "name": "petId", + "in": "path", + "description": "Pet id to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Pet not found" + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/pet/{petId}/uploadImage": { + "post": { + "tags": [ + "pet" + ], + "summary": "uploads an image", + "description": "", + "operationId": "uploadFile", + "consumes": [ + "multipart/form-data" + ], + "produces": [ + "application/json" + ], + "parameters": [ + { + "name": "petId", + "in": "path", + "description": "ID of pet to update", + "required": true, + "type": "integer", + "format": "int64" + }, + { + "name": "additionalMetadata", + "in": "formData", + "description": "Additional data to pass to server", + "required": false, + "type": "string" + }, + { + "name": "file", + "in": "formData", + "description": "file to upload", + "required": false, + "type": "file" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/ApiResponse" + } + } + }, + "security": [ + { + "petstore_auth": [ + "write:pets", + "read:pets" + ] + } + ] + } + }, + "/store/inventory": { + "get": { + "tags": [ + "store" + ], + "summary": "Returns pet inventories by status", + "description": "Returns a map of status codes to quantities", + "operationId": "getInventory", + "produces": [ + "application/json" + ], + "parameters": [], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "object", + "additionalProperties": { + "type": "integer", + "format": "int32" + } + } + } + }, + "security": [ + { + "api_key": [] + } + ] + } + }, + "/store/order": { + "post": { + "tags": [ + "store" + ], + "summary": "Place an order for a pet", + "description": "", + "operationId": "placeOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "order placed for purchasing the pet", + "required": true, + "schema": { + "$ref": "#/definitions/Order" + } + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid Order" + } + } + } + }, + "/store/order/{orderId}": { + "get": { + "tags": [ + "store" + ], + "summary": "Find purchase order by ID", + "description": "For valid response try integer IDs with value >= 1 and <= 10. Other values will generated exceptions", + "operationId": "getOrderById", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of pet that needs to be fetched", + "required": true, + "type": "integer", + "maximum": 10.0, + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/Order" + } + }, + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + }, + "delete": { + "tags": [ + "store" + ], + "summary": "Delete purchase order by ID", + "description": "For valid response try integer IDs with positive integer value. Negative or non-integer values will generate API errors", + "operationId": "deleteOrder", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "orderId", + "in": "path", + "description": "ID of the order that needs to be deleted", + "required": true, + "type": "integer", + "minimum": 1.0, + "format": "int64" + } + ], + "responses": { + "400": { + "description": "Invalid ID supplied" + }, + "404": { + "description": "Order not found" + } + } + } + }, + "/user": { + "post": { + "tags": [ + "user" + ], + "summary": "Create user", + "description": "This can only be done by the logged in user.", + "operationId": "createUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "Created user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithArray": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithArrayInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/createWithList": { + "post": { + "tags": [ + "user" + ], + "summary": "Creates list of users with given input array", + "description": "", + "operationId": "createUsersWithListInput", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "in": "body", + "name": "body", + "description": "List of user object", + "required": true, + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/User" + } + } + } + ], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/login": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs user into the system", + "description": "", + "operationId": "loginUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "query", + "description": "The user name for login", + "required": true, + "type": "string" + }, + { + "name": "password", + "in": "query", + "description": "The password for login in clear text", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "type": "string" + }, + "headers": { + "X-Rate-Limit": { + "type": "integer", + "format": "int32", + "description": "calls per hour allowed by the user" + }, + "X-Expires-After": { + "type": "string", + "format": "date-time", + "description": "date in UTC when token expires" + } + } + }, + "400": { + "description": "Invalid username/password supplied" + } + } + } + }, + "/user/logout": { + "get": { + "tags": [ + "user" + ], + "summary": "Logs out current logged in user session", + "description": "", + "operationId": "logoutUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [], + "responses": { + "default": { + "description": "successful operation" + } + } + } + }, + "/user/{username}": { + "get": { + "tags": [ + "user" + ], + "summary": "Get user by user name", + "description": "", + "operationId": "getUserByName", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be fetched. Use user1 for testing. ", + "required": true, + "type": "string" + } + ], + "responses": { + "200": { + "description": "successful operation", + "schema": { + "$ref": "#/definitions/User" + } + }, + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "put": { + "tags": [ + "user" + ], + "summary": "Updated user", + "description": "This can only be done by the logged in user.", + "operationId": "updateUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "name that need to be updated", + "required": true, + "type": "string" + }, + { + "in": "body", + "name": "body", + "description": "Updated user object", + "required": true, + "schema": { + "$ref": "#/definitions/User" + } + } + ], + "responses": { + "400": { + "description": "Invalid user supplied" + }, + "404": { + "description": "User not found" + } + } + }, + "delete": { + "tags": [ + "user" + ], + "summary": "Delete user", + "description": "This can only be done by the logged in user.", + "operationId": "deleteUser", + "produces": [ + "application/xml", + "application/json" + ], + "parameters": [ + { + "name": "username", + "in": "path", + "description": "The name that needs to be deleted", + "required": true, + "type": "string" + } + ], + "responses": { + "400": { + "description": "Invalid username supplied" + }, + "404": { + "description": "User not found" + } + } + } + } + }, + "securityDefinitions": { + "petstore_auth": { + "type": "oauth2", + "authorizationUrl": "http://petstore.swagger.io/oauth/dialog", + "flow": "implicit", + "scopes": { + "write:pets": "modify pets in your account", + "read:pets": "read your pets" + } + }, + "api_key": { + "type": "apiKey", + "name": "api_key", + "in": "header" + } + }, + "definitions": { + "Order": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "petId": { + "type": "integer", + "format": "int64" + }, + "quantity": { + "type": "integer", + "format": "int32" + }, + "shipDate": { + "type": "string", + "format": "date-time" + }, + "status": { + "type": "string", + "description": "Order Status", + "enum": [ + "placed", + "approved", + "delivered" + ] + }, + "complete": { + "type": "boolean", + "default": false + } + }, + "xml": { + "name": "Order" + } + }, + "Category": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Category" + } + }, + "User": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "username": { + "type": "string" + }, + "firstName": { + "type": "string" + }, + "lastName": { + "type": "string" + }, + "email": { + "type": "string" + }, + "password": { + "type": "string" + }, + "phone": { + "type": "string" + }, + "userStatus": { + "type": "integer", + "format": "int32", + "description": "User Status" + } + }, + "xml": { + "name": "User" + } + }, + "Tag": { + "type": "object", + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "name": { + "type": "string" + } + }, + "xml": { + "name": "Tag" + } + }, + "Pet": { + "type": "object", + "required": [ + "name", + "photoUrls" + ], + "properties": { + "id": { + "type": "integer", + "format": "int64" + }, + "category": { + "$ref": "#/definitions/Category" + }, + "name": { + "type": "string", + "example": "doggie" + }, + "photoUrls": { + "type": "array", + "xml": { + "name": "photoUrl", + "wrapped": true + }, + "items": { + "type": "string" + } + }, + "tags": { + "type": "array", + "xml": { + "name": "tag", + "wrapped": true + }, + "items": { + "$ref": "#/definitions/Tag" + } + }, + "status": { + "type": "string", + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ] + } + }, + "xml": { + "name": "Pet" + } + }, + "ApiResponse": { + "type": "object", + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "type": { + "type": "string" + }, + "message": { + "type": "string" + } + } + } + }, + "externalDocs": { + "description": "Find out more about Swagger", + "url": "http://swagger.io" + } + }, + "valid": true + } + ] + } +] diff --git a/src/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py b/src/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py new file mode 100644 index 0000000..905fb6a --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/benchmarks/json_schema_test_suite.py @@ -0,0 +1,12 @@ +""" +A performance benchmark using the official test suite. + +This benchmarks jsonschema using every valid example in the +JSON-Schema-Test-Suite. It will take some time to complete. +""" +from pyperf import Runner + +from jsonschema.tests._suite import Suite + +if __name__ == "__main__": + Suite().benchmark(runner=Runner()) diff --git a/src/poetry/core/_vendor/jsonschema/cli.py b/src/poetry/core/_vendor/jsonschema/cli.py new file mode 100644 index 0000000..f93b5c5 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/cli.py @@ -0,0 +1,299 @@ +""" +The ``jsonschema`` command line. +""" + +from json import JSONDecodeError +from textwrap import dedent +import argparse +import json +import sys +import traceback +import warnings + +try: + from importlib import metadata +except ImportError: + import importlib_metadata as metadata # type: ignore + +try: + from pkgutil import resolve_name +except ImportError: + from pkgutil_resolve_name import resolve_name # type: ignore + +import attr + +from jsonschema.exceptions import SchemaError +from jsonschema.validators import RefResolver, validator_for + +warnings.warn( + ( + "The jsonschema CLI is deprecated and will be removed in a future " + "version. Please use check-jsonschema instead, which can be installed " + "from https://pypi.org/project/check-jsonschema/" + ), + DeprecationWarning, + stacklevel=2, +) + + +class _CannotLoadFile(Exception): + pass + + +@attr.s +class _Outputter: + + _formatter = attr.ib() + _stdout = attr.ib() + _stderr = attr.ib() + + @classmethod + def from_arguments(cls, arguments, stdout, stderr): + if arguments["output"] == "plain": + formatter = _PlainFormatter(arguments["error_format"]) + elif arguments["output"] == "pretty": + formatter = _PrettyFormatter() + return cls(formatter=formatter, stdout=stdout, stderr=stderr) + + def load(self, path): + try: + file = open(path) + except FileNotFoundError: + self.filenotfound_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() + + with file: + try: + return json.load(file) + except JSONDecodeError: + self.parsing_error(path=path, exc_info=sys.exc_info()) + raise _CannotLoadFile() + + def filenotfound_error(self, **kwargs): + self._stderr.write(self._formatter.filenotfound_error(**kwargs)) + + def parsing_error(self, **kwargs): + self._stderr.write(self._formatter.parsing_error(**kwargs)) + + def validation_error(self, **kwargs): + self._stderr.write(self._formatter.validation_error(**kwargs)) + + def validation_success(self, **kwargs): + self._stdout.write(self._formatter.validation_success(**kwargs)) + + +@attr.s +class _PrettyFormatter: + + _ERROR_MSG = dedent( + """\ + ===[{type}]===({path})=== + + {body} + ----------------------------- + """, + ) + _SUCCESS_MSG = "===[SUCCESS]===({path})===\n" + + def filenotfound_error(self, path, exc_info): + return self._ERROR_MSG.format( + path=path, + type="FileNotFoundError", + body="{!r} does not exist.".format(path), + ) + + def parsing_error(self, path, exc_info): + exc_type, exc_value, exc_traceback = exc_info + exc_lines = "".join( + traceback.format_exception(exc_type, exc_value, exc_traceback), + ) + return self._ERROR_MSG.format( + path=path, + type=exc_type.__name__, + body=exc_lines, + ) + + def validation_error(self, instance_path, error): + return self._ERROR_MSG.format( + path=instance_path, + type=error.__class__.__name__, + body=error, + ) + + def validation_success(self, instance_path): + return self._SUCCESS_MSG.format(path=instance_path) + + +@attr.s +class _PlainFormatter: + + _error_format = attr.ib() + + def filenotfound_error(self, path, exc_info): + return "{!r} does not exist.\n".format(path) + + def parsing_error(self, path, exc_info): + return "Failed to parse {}: {}\n".format( + "" if path == "" else repr(path), + exc_info[1], + ) + + def validation_error(self, instance_path, error): + return self._error_format.format(file_name=instance_path, error=error) + + def validation_success(self, instance_path): + return "" + + +def _resolve_name_with_default(name): + if "." not in name: + name = "jsonschema." + name + return resolve_name(name) + + +parser = argparse.ArgumentParser( + description="JSON Schema Validation CLI", +) +parser.add_argument( + "-i", "--instance", + action="append", + dest="instances", + help=""" + a path to a JSON instance (i.e. filename.json) to validate (may + be specified multiple times). If no instances are provided via this + option, one will be expected on standard input. + """, +) +parser.add_argument( + "-F", "--error-format", + help=""" + the format to use for each validation error message, specified + in a form suitable for str.format. This string will be passed + one formatted object named 'error' for each ValidationError. + Only provide this option when using --output=plain, which is the + default. If this argument is unprovided and --output=plain is + used, a simple default representation will be used. + """, +) +parser.add_argument( + "-o", "--output", + choices=["plain", "pretty"], + default="plain", + help=""" + an output format to use. 'plain' (default) will produce minimal + text with one line for each error, while 'pretty' will produce + more detailed human-readable output on multiple lines. + """, +) +parser.add_argument( + "-V", "--validator", + type=_resolve_name_with_default, + help=""" + the fully qualified object name of a validator to use, or, for + validators that are registered with jsonschema, simply the name + of the class. + """, +) +parser.add_argument( + "--base-uri", + help=""" + a base URI to assign to the provided schema, even if it does not + declare one (via e.g. $id). This option can be used if you wish to + resolve relative references to a particular URI (or local path) + """, +) +parser.add_argument( + "--version", + action="version", + version=metadata.version("jsonschema"), +) +parser.add_argument( + "schema", + help="the path to a JSON Schema to validate with (i.e. schema.json)", +) + + +def parse_args(args): + arguments = vars(parser.parse_args(args=args or ["--help"])) + if arguments["output"] != "plain" and arguments["error_format"]: + raise parser.error( + "--error-format can only be used with --output plain", + ) + if arguments["output"] == "plain" and arguments["error_format"] is None: + arguments["error_format"] = "{error.instance}: {error.message}\n" + return arguments + + +def _validate_instance(instance_path, instance, validator, outputter): + invalid = False + for error in validator.iter_errors(instance): + invalid = True + outputter.validation_error(instance_path=instance_path, error=error) + + if not invalid: + outputter.validation_success(instance_path=instance_path) + return invalid + + +def main(args=sys.argv[1:]): + sys.exit(run(arguments=parse_args(args=args))) + + +def run(arguments, stdout=sys.stdout, stderr=sys.stderr, stdin=sys.stdin): + outputter = _Outputter.from_arguments( + arguments=arguments, + stdout=stdout, + stderr=stderr, + ) + + try: + schema = outputter.load(arguments["schema"]) + except _CannotLoadFile: + return 1 + + if arguments["validator"] is None: + arguments["validator"] = validator_for(schema) + + try: + arguments["validator"].check_schema(schema) + except SchemaError as error: + outputter.validation_error( + instance_path=arguments["schema"], + error=error, + ) + return 1 + + if arguments["instances"]: + load, instances = outputter.load, arguments["instances"] + else: + def load(_): + try: + return json.load(stdin) + except JSONDecodeError: + outputter.parsing_error( + path="", exc_info=sys.exc_info(), + ) + raise _CannotLoadFile() + instances = [""] + + resolver = RefResolver( + base_uri=arguments["base_uri"], + referrer=schema, + ) if arguments["base_uri"] is not None else None + + validator = arguments["validator"](schema, resolver=resolver) + exit_code = 0 + for each in instances: + try: + instance = load(each) + except _CannotLoadFile: + exit_code = 1 + else: + exit_code |= _validate_instance( + instance_path=each, + instance=instance, + validator=validator, + outputter=outputter, + ) + + return exit_code diff --git a/src/poetry/core/_vendor/jsonschema/exceptions.py b/src/poetry/core/_vendor/jsonschema/exceptions.py new file mode 100644 index 0000000..87db3df --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/exceptions.py @@ -0,0 +1,396 @@ +""" +Validation errors, and some surrounding helpers. +""" +from __future__ import annotations + +from collections import defaultdict, deque +from pprint import pformat +from textwrap import dedent, indent +import heapq +import itertools + +import attr + +from jsonschema import _utils + +WEAK_MATCHES: frozenset[str] = frozenset(["anyOf", "oneOf"]) +STRONG_MATCHES: frozenset[str] = frozenset() + +_unset = _utils.Unset() + + +class _Error(Exception): + def __init__( + self, + message, + validator=_unset, + path=(), + cause=None, + context=(), + validator_value=_unset, + instance=_unset, + schema=_unset, + schema_path=(), + parent=None, + type_checker=_unset, + ): + super(_Error, self).__init__( + message, + validator, + path, + cause, + context, + validator_value, + instance, + schema, + schema_path, + parent, + ) + self.message = message + self.path = self.relative_path = deque(path) + self.schema_path = self.relative_schema_path = deque(schema_path) + self.context = list(context) + self.cause = self.__cause__ = cause + self.validator = validator + self.validator_value = validator_value + self.instance = instance + self.schema = schema + self.parent = parent + self._type_checker = type_checker + + for error in context: + error.parent = self + + def __repr__(self): + return f"<{self.__class__.__name__}: {self.message!r}>" + + def __str__(self): + essential_for_verbose = ( + self.validator, self.validator_value, self.instance, self.schema, + ) + if any(m is _unset for m in essential_for_verbose): + return self.message + + schema_path = _utils.format_as_index( + container=self._word_for_schema_in_error_message, + indices=list(self.relative_schema_path)[:-1], + ) + instance_path = _utils.format_as_index( + container=self._word_for_instance_in_error_message, + indices=self.relative_path, + ) + prefix = 16 * " " + + return dedent( + f"""\ + {self.message} + + Failed validating {self.validator!r} in {schema_path}: + {indent(pformat(self.schema, width=72), prefix).lstrip()} + + On {instance_path}: + {indent(pformat(self.instance, width=72), prefix).lstrip()} + """.rstrip(), + ) + + @classmethod + def create_from(cls, other): + return cls(**other._contents()) + + @property + def absolute_path(self): + parent = self.parent + if parent is None: + return self.relative_path + + path = deque(self.relative_path) + path.extendleft(reversed(parent.absolute_path)) + return path + + @property + def absolute_schema_path(self): + parent = self.parent + if parent is None: + return self.relative_schema_path + + path = deque(self.relative_schema_path) + path.extendleft(reversed(parent.absolute_schema_path)) + return path + + @property + def json_path(self): + path = "$" + for elem in self.absolute_path: + if isinstance(elem, int): + path += "[" + str(elem) + "]" + else: + path += "." + elem + return path + + def _set(self, type_checker=None, **kwargs): + if type_checker is not None and self._type_checker is _unset: + self._type_checker = type_checker + + for k, v in kwargs.items(): + if getattr(self, k) is _unset: + setattr(self, k, v) + + def _contents(self): + attrs = ( + "message", "cause", "context", "validator", "validator_value", + "path", "schema_path", "instance", "schema", "parent", + ) + return dict((attr, getattr(self, attr)) for attr in attrs) + + def _matches_type(self): + try: + expected = self.schema["type"] + except (KeyError, TypeError): + return False + + if isinstance(expected, str): + return self._type_checker.is_type(self.instance, expected) + + return any( + self._type_checker.is_type(self.instance, expected_type) + for expected_type in expected + ) + + +class ValidationError(_Error): + """ + An instance was invalid under a provided schema. + """ + + _word_for_schema_in_error_message = "schema" + _word_for_instance_in_error_message = "instance" + + +class SchemaError(_Error): + """ + A schema was invalid under its corresponding metaschema. + """ + + _word_for_schema_in_error_message = "metaschema" + _word_for_instance_in_error_message = "schema" + + +@attr.s(hash=True) +class RefResolutionError(Exception): + """ + A ref could not be resolved. + """ + + _cause = attr.ib() + + def __str__(self): + return str(self._cause) + + +class UndefinedTypeCheck(Exception): + """ + A type checker was asked to check a type it did not have registered. + """ + + def __init__(self, type): + self.type = type + + def __str__(self): + return f"Type {self.type!r} is unknown to this type checker" + + +class UnknownType(Exception): + """ + A validator was asked to validate an instance against an unknown type. + """ + + def __init__(self, type, instance, schema): + self.type = type + self.instance = instance + self.schema = schema + + def __str__(self): + prefix = 16 * " " + + return dedent( + f"""\ + Unknown type {self.type!r} for validator with schema: + {indent(pformat(self.schema, width=72), prefix).lstrip()} + + While checking instance: + {indent(pformat(self.instance, width=72), prefix).lstrip()} + """.rstrip(), + ) + + +class FormatError(Exception): + """ + Validating a format failed. + """ + + def __init__(self, message, cause=None): + super(FormatError, self).__init__(message, cause) + self.message = message + self.cause = self.__cause__ = cause + + def __str__(self): + return self.message + + +class ErrorTree: + """ + ErrorTrees make it easier to check which validations failed. + """ + + _instance = _unset + + def __init__(self, errors=()): + self.errors = {} + self._contents = defaultdict(self.__class__) + + for error in errors: + container = self + for element in error.path: + container = container[element] + container.errors[error.validator] = error + + container._instance = error.instance + + def __contains__(self, index): + """ + Check whether ``instance[index]`` has any errors. + """ + + return index in self._contents + + def __getitem__(self, index): + """ + Retrieve the child tree one level down at the given ``index``. + + If the index is not in the instance that this tree corresponds + to and is not known by this tree, whatever error would be raised + by ``instance.__getitem__`` will be propagated (usually this is + some subclass of `LookupError`. + """ + + if self._instance is not _unset and index not in self: + self._instance[index] + return self._contents[index] + + def __setitem__(self, index, value): + """ + Add an error to the tree at the given ``index``. + """ + self._contents[index] = value + + def __iter__(self): + """ + Iterate (non-recursively) over the indices in the instance with errors. + """ + + return iter(self._contents) + + def __len__(self): + """ + Return the `total_errors`. + """ + return self.total_errors + + def __repr__(self): + total = len(self) + errors = "error" if total == 1 else "errors" + return f"<{self.__class__.__name__} ({total} total {errors})>" + + @property + def total_errors(self): + """ + The total number of errors in the entire tree, including children. + """ + + child_errors = sum(len(tree) for _, tree in self._contents.items()) + return len(self.errors) + child_errors + + +def by_relevance(weak=WEAK_MATCHES, strong=STRONG_MATCHES): + """ + Create a key function that can be used to sort errors by relevance. + + Arguments: + weak (set): + a collection of validation keywords to consider to be + "weak". If there are two errors at the same level of the + instance and one is in the set of weak validation keywords, + the other error will take priority. By default, :kw:`anyOf` + and :kw:`oneOf` are considered weak keywords and will be + superseded by other same-level validation errors. + + strong (set): + a collection of validation keywords to consider to be + "strong" + """ + def relevance(error): + validator = error.validator + return ( + -len(error.path), + validator not in weak, + validator in strong, + not error._matches_type(), + ) + return relevance + + +relevance = by_relevance() + + +def best_match(errors, key=relevance): + """ + Try to find an error that appears to be the best match among given errors. + + In general, errors that are higher up in the instance (i.e. for which + `ValidationError.path` is shorter) are considered better matches, + since they indicate "more" is wrong with the instance. + + If the resulting match is either :kw:`oneOf` or :kw:`anyOf`, the + *opposite* assumption is made -- i.e. the deepest error is picked, + since these keywords only need to match once, and any other errors + may not be relevant. + + Arguments: + errors (collections.abc.Iterable): + + the errors to select from. Do not provide a mixture of + errors from different validation attempts (i.e. from + different instances or schemas), since it won't produce + sensical output. + + key (collections.abc.Callable): + + the key to use when sorting errors. See `relevance` and + transitively `by_relevance` for more details (the default is + to sort with the defaults of that function). Changing the + default is only useful if you want to change the function + that rates errors but still want the error context descent + done by this function. + + Returns: + the best matching error, or ``None`` if the iterable was empty + + .. note:: + + This function is a heuristic. Its return value may change for a given + set of inputs from version to version if better heuristics are added. + """ + errors = iter(errors) + best = next(errors, None) + if best is None: + return + best = max(itertools.chain([best], errors), key=key) + + while best.context: + # Calculate the minimum via nsmallest, because we don't recurse if + # all nested errors have the same relevance (i.e. if min == max == all) + smallest = heapq.nsmallest(2, best.context, key=key) + if len(smallest) == 2 and key(smallest[0]) == key(smallest[1]): + return best + best = smallest[0] + return best diff --git a/src/poetry/core/_vendor/jsonschema/protocols.py b/src/poetry/core/_vendor/jsonschema/protocols.py new file mode 100644 index 0000000..2a8f00d --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/protocols.py @@ -0,0 +1,224 @@ +""" +typing.Protocol classes for jsonschema interfaces. +""" + +# for reference material on Protocols, see +# https://www.python.org/dev/peps/pep-0544/ + +from __future__ import annotations + +from collections.abc import Callable, Mapping +from typing import TYPE_CHECKING, Any, ClassVar, Iterable +import sys + +# doing these imports with `try ... except ImportError` doesn't pass mypy +# checking because mypy sees `typing._SpecialForm` and +# `typing_extensions._SpecialForm` as incompatible +# +# see: +# https://mypy.readthedocs.io/en/stable/runtime_troubles.html#using-new-additions-to-the-typing-module +# https://github.com/python/mypy/issues/4427 +if sys.version_info >= (3, 8): + from typing import Protocol, runtime_checkable +else: + from typing_extensions import Protocol, runtime_checkable + +# in order for Sphinx to resolve references accurately from type annotations, +# it needs to see names like `jsonschema.TypeChecker` +# therefore, only import at type-checking time (to avoid circular references), +# but use `jsonschema` for any types which will otherwise not be resolvable +if TYPE_CHECKING: + import jsonschema + +from jsonschema.exceptions import ValidationError + +# For code authors working on the validator protocol, these are the three +# use-cases which should be kept in mind: +# +# 1. As a protocol class, it can be used in type annotations to describe the +# available methods and attributes of a validator +# 2. It is the source of autodoc for the validator documentation +# 3. It is runtime_checkable, meaning that it can be used in isinstance() +# checks. +# +# Since protocols are not base classes, isinstance() checking is limited in +# its capabilities. See docs on runtime_checkable for detail + + +@runtime_checkable +class Validator(Protocol): + """ + The protocol to which all validator classes adhere. + + Arguments: + + schema: + + The schema that the validator object will validate with. + It is assumed to be valid, and providing + an invalid schema can lead to undefined behavior. See + `Validator.check_schema` to validate a schema first. + + resolver: + + a resolver that will be used to resolve :kw:`$ref` + properties (JSON references). If unprovided, one will be created. + + format_checker: + + if provided, a checker which will be used to assert about + :kw:`format` properties present in the schema. If unprovided, + *no* format validation is done, and the presence of format + within schemas is strictly informational. Certain formats + require additional packages to be installed in order to assert + against instances. Ensure you've installed `jsonschema` with + its `extra (optional) dependencies ` when + invoking ``pip``. + + .. deprecated:: v4.12.0 + + Subclassing validator classes now explicitly warns this is not part of + their public API. + """ + + #: An object representing the validator's meta schema (the schema that + #: describes valid schemas in the given version). + META_SCHEMA: ClassVar[Mapping] + + #: A mapping of validation keywords (`str`\s) to functions that + #: validate the keyword with that name. For more information see + #: `creating-validators`. + VALIDATORS: ClassVar[Mapping] + + #: A `jsonschema.TypeChecker` that will be used when validating + #: :kw:`type` keywords in JSON schemas. + TYPE_CHECKER: ClassVar[jsonschema.TypeChecker] + + #: A `jsonschema.FormatChecker` that will be used when validating + #: :kw:`format` keywords in JSON schemas. + FORMAT_CHECKER: ClassVar[jsonschema.FormatChecker] + + #: A function which given a schema returns its ID. + ID_OF: Callable[[Any], str | None] + + #: The schema that will be used to validate instances + schema: Mapping | bool + + def __init__( + self, + schema: Mapping | bool, + resolver: jsonschema.RefResolver | None = None, + format_checker: jsonschema.FormatChecker | None = None, + ) -> None: + ... + + @classmethod + def check_schema(cls, schema: Mapping | bool) -> None: + """ + Validate the given schema against the validator's `META_SCHEMA`. + + Raises: + + `jsonschema.exceptions.SchemaError`: + + if the schema is invalid + """ + + def is_type(self, instance: Any, type: str) -> bool: + """ + Check if the instance is of the given (JSON Schema) type. + + Arguments: + + instance: + + the value to check + + type: + + the name of a known (JSON Schema) type + + Returns: + + whether the instance is of the given type + + Raises: + + `jsonschema.exceptions.UnknownType`: + + if ``type`` is not a known type + """ + + def is_valid(self, instance: Any) -> bool: + """ + Check if the instance is valid under the current `schema`. + + Returns: + + whether the instance is valid or not + + >>> schema = {"maxItems" : 2} + >>> Draft202012Validator(schema).is_valid([2, 3, 4]) + False + """ + + def iter_errors(self, instance: Any) -> Iterable[ValidationError]: + r""" + Lazily yield each of the validation errors in the given instance. + + >>> schema = { + ... "type" : "array", + ... "items" : {"enum" : [1, 2, 3]}, + ... "maxItems" : 2, + ... } + >>> v = Draft202012Validator(schema) + >>> for error in sorted(v.iter_errors([2, 3, 4]), key=str): + ... print(error.message) + 4 is not one of [1, 2, 3] + [2, 3, 4] is too long + + .. deprecated:: v4.0.0 + + Calling this function with a second schema argument is deprecated. + Use `Validator.evolve` instead. + """ + + def validate(self, instance: Any) -> None: + """ + Check if the instance is valid under the current `schema`. + + Raises: + + `jsonschema.exceptions.ValidationError`: + + if the instance is invalid + + >>> schema = {"maxItems" : 2} + >>> Draft202012Validator(schema).validate([2, 3, 4]) + Traceback (most recent call last): + ... + ValidationError: [2, 3, 4] is too long + """ + + def evolve(self, **kwargs) -> "Validator": + """ + Create a new validator like this one, but with given changes. + + Preserves all other attributes, so can be used to e.g. create a + validator with a different schema but with the same :kw:`$ref` + resolution behavior. + + >>> validator = Draft202012Validator({}) + >>> validator.evolve(schema={"type": "number"}) + Draft202012Validator(schema={'type': 'number'}, format_checker=None) + + The returned object satisfies the validator protocol, but may not + be of the same concrete class! In particular this occurs + when a :kw:`$ref` occurs to a schema with a different + :kw:`$schema` than this one (i.e. for a different draft). + + >>> validator.evolve( + ... schema={"$schema": Draft7Validator.META_SCHEMA["$id"]} + ... ) + Draft7Validator(schema=..., format_checker=None) + """ diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft2019-09.json b/src/poetry/core/_vendor/jsonschema/schemas/draft2019-09.json new file mode 100644 index 0000000..2248a0c --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft2019-09.json @@ -0,0 +1,42 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft2020-12.json b/src/poetry/core/_vendor/jsonschema/schemas/draft2020-12.json new file mode 100644 index 0000000..d5e2d31 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft2020-12.json @@ -0,0 +1,58 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft3.json b/src/poetry/core/_vendor/jsonschema/schemas/draft3.json new file mode 100644 index 0000000..8b26b1f --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft3.json @@ -0,0 +1,172 @@ +{ + "$schema" : "http://json-schema.org/draft-03/schema#", + "id" : "http://json-schema.org/draft-03/schema#", + "type" : "object", + + "properties" : { + "type" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true, + "default" : "any" + }, + + "properties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "patternProperties" : { + "type" : "object", + "additionalProperties" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalProperties" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "items" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "additionalItems" : { + "type" : [{"$ref" : "#"}, "boolean"], + "default" : {} + }, + + "required" : { + "type" : "boolean", + "default" : false + }, + + "dependencies" : { + "type" : "object", + "additionalProperties" : { + "type" : ["string", "array", {"$ref" : "#"}], + "items" : { + "type" : "string" + } + }, + "default" : {} + }, + + "minimum" : { + "type" : "number" + }, + + "maximum" : { + "type" : "number" + }, + + "exclusiveMinimum" : { + "type" : "boolean", + "default" : false + }, + + "exclusiveMaximum" : { + "type" : "boolean", + "default" : false + }, + + "minItems" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxItems" : { + "type" : "integer", + "minimum" : 0 + }, + + "uniqueItems" : { + "type" : "boolean", + "default" : false + }, + + "pattern" : { + "type" : "string", + "format" : "regex" + }, + + "minLength" : { + "type" : "integer", + "minimum" : 0, + "default" : 0 + }, + + "maxLength" : { + "type" : "integer" + }, + + "enum" : { + "type" : "array", + "minItems" : 1, + "uniqueItems" : true + }, + + "default" : { + "type" : "any" + }, + + "title" : { + "type" : "string" + }, + + "description" : { + "type" : "string" + }, + + "format" : { + "type" : "string" + }, + + "divisibleBy" : { + "type" : "number", + "minimum" : 0, + "exclusiveMinimum" : true, + "default" : 1 + }, + + "disallow" : { + "type" : ["string", "array"], + "items" : { + "type" : ["string", {"$ref" : "#"}] + }, + "uniqueItems" : true + }, + + "extends" : { + "type" : [{"$ref" : "#"}, "array"], + "items" : {"$ref" : "#"}, + "default" : {} + }, + + "id" : { + "type" : "string" + }, + + "$ref" : { + "type" : "string" + }, + + "$schema" : { + "type" : "string", + "format" : "uri" + } + }, + + "dependencies" : { + "exclusiveMinimum" : "minimum", + "exclusiveMaximum" : "maximum" + }, + + "default" : {} +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft4.json b/src/poetry/core/_vendor/jsonschema/schemas/draft4.json new file mode 100644 index 0000000..bcbb847 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft4.json @@ -0,0 +1,149 @@ +{ + "id": "http://json-schema.org/draft-04/schema#", + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string" + }, + "$schema": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft6.json b/src/poetry/core/_vendor/jsonschema/schemas/draft6.json new file mode 100644 index 0000000..a0d2bf7 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft6.json @@ -0,0 +1,153 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "examples": { + "type": "array", + "items": {} + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array" + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/draft7.json b/src/poetry/core/_vendor/jsonschema/schemas/draft7.json new file mode 100644 index 0000000..746cde9 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/draft7.json @@ -0,0 +1,166 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": {"$ref": "#"}, + "then": {"$ref": "#"}, + "else": {"$ref": "#"}, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/src/poetry/core/_vendor/jsonschema/schemas/vocabularies.json b/src/poetry/core/_vendor/jsonschema/schemas/vocabularies.json new file mode 100644 index 0000000..bca1705 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/schemas/vocabularies.json @@ -0,0 +1 @@ +{"https://json-schema.org/draft/2020-12/meta/content": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/content", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/content": true}, "$dynamicAnchor": "meta", "title": "Content vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"contentEncoding": {"type": "string"}, "contentMediaType": {"type": "string"}, "contentSchema": {"$dynamicRef": "#meta"}}}, "https://json-schema.org/draft/2020-12/meta/unevaluated": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/unevaluated": true}, "$dynamicAnchor": "meta", "title": "Unevaluated applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"unevaluatedItems": {"$dynamicRef": "#meta"}, "unevaluatedProperties": {"$dynamicRef": "#meta"}}}, "https://json-schema.org/draft/2020-12/meta/format-annotation": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/format-annotation": true}, "$dynamicAnchor": "meta", "title": "Format vocabulary meta-schema for annotation results", "type": ["object", "boolean"], "properties": {"format": {"type": "string"}}}, "https://json-schema.org/draft/2020-12/meta/applicator": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/applicator", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/applicator": true}, "$dynamicAnchor": "meta", "title": "Applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"prefixItems": {"$ref": "#/$defs/schemaArray"}, "items": {"$dynamicRef": "#meta"}, "contains": {"$dynamicRef": "#meta"}, "additionalProperties": {"$dynamicRef": "#meta"}, "properties": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "default": {}}, "patternProperties": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "propertyNames": {"format": "regex"}, "default": {}}, "dependentSchemas": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}, "default": {}}, "propertyNames": {"$dynamicRef": "#meta"}, "if": {"$dynamicRef": "#meta"}, "then": {"$dynamicRef": "#meta"}, "else": {"$dynamicRef": "#meta"}, "allOf": {"$ref": "#/$defs/schemaArray"}, "anyOf": {"$ref": "#/$defs/schemaArray"}, "oneOf": {"$ref": "#/$defs/schemaArray"}, "not": {"$dynamicRef": "#meta"}}, "$defs": {"schemaArray": {"type": "array", "minItems": 1, "items": {"$dynamicRef": "#meta"}}}}, "https://json-schema.org/draft/2020-12/meta/meta-data": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/meta-data": true}, "$dynamicAnchor": "meta", "title": "Meta-data vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"title": {"type": "string"}, "description": {"type": "string"}, "default": true, "deprecated": {"type": "boolean", "default": false}, "readOnly": {"type": "boolean", "default": false}, "writeOnly": {"type": "boolean", "default": false}, "examples": {"type": "array", "items": true}}}, "https://json-schema.org/draft/2020-12/meta/core": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/core", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/core": true}, "$dynamicAnchor": "meta", "title": "Core vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"$id": {"$ref": "#/$defs/uriReferenceString", "$comment": "Non-empty fragments not allowed.", "pattern": "^[^#]*#?$"}, "$schema": {"$ref": "#/$defs/uriString"}, "$ref": {"$ref": "#/$defs/uriReferenceString"}, "$anchor": {"$ref": "#/$defs/anchorString"}, "$dynamicRef": {"$ref": "#/$defs/uriReferenceString"}, "$dynamicAnchor": {"$ref": "#/$defs/anchorString"}, "$vocabulary": {"type": "object", "propertyNames": {"$ref": "#/$defs/uriString"}, "additionalProperties": {"type": "boolean"}}, "$comment": {"type": "string"}, "$defs": {"type": "object", "additionalProperties": {"$dynamicRef": "#meta"}}}, "$defs": {"anchorString": {"type": "string", "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$"}, "uriString": {"type": "string", "format": "uri"}, "uriReferenceString": {"type": "string", "format": "uri-reference"}}}, "https://json-schema.org/draft/2020-12/meta/validation": {"$schema": "https://json-schema.org/draft/2020-12/schema", "$id": "https://json-schema.org/draft/2020-12/meta/validation", "$vocabulary": {"https://json-schema.org/draft/2020-12/vocab/validation": true}, "$dynamicAnchor": "meta", "title": "Validation vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"type": {"anyOf": [{"$ref": "#/$defs/simpleTypes"}, {"type": "array", "items": {"$ref": "#/$defs/simpleTypes"}, "minItems": 1, "uniqueItems": true}]}, "const": true, "enum": {"type": "array", "items": true}, "multipleOf": {"type": "number", "exclusiveMinimum": 0}, "maximum": {"type": "number"}, "exclusiveMaximum": {"type": "number"}, "minimum": {"type": "number"}, "exclusiveMinimum": {"type": "number"}, "maxLength": {"$ref": "#/$defs/nonNegativeInteger"}, "minLength": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "pattern": {"type": "string", "format": "regex"}, "maxItems": {"$ref": "#/$defs/nonNegativeInteger"}, "minItems": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "uniqueItems": {"type": "boolean", "default": false}, "maxContains": {"$ref": "#/$defs/nonNegativeInteger"}, "minContains": {"$ref": "#/$defs/nonNegativeInteger", "default": 1}, "maxProperties": {"$ref": "#/$defs/nonNegativeInteger"}, "minProperties": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "required": {"$ref": "#/$defs/stringArray"}, "dependentRequired": {"type": "object", "additionalProperties": {"$ref": "#/$defs/stringArray"}}}, "$defs": {"nonNegativeInteger": {"type": "integer", "minimum": 0}, "nonNegativeIntegerDefault0": {"$ref": "#/$defs/nonNegativeInteger", "default": 0}, "simpleTypes": {"enum": ["array", "boolean", "integer", "null", "number", "object", "string"]}, "stringArray": {"type": "array", "items": {"type": "string"}, "uniqueItems": true, "default": []}}}, "https://json-schema.org/draft/2019-09/meta/content": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/content", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/content": true}, "$recursiveAnchor": true, "title": "Content vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"contentMediaType": {"type": "string"}, "contentEncoding": {"type": "string"}, "contentSchema": {"$recursiveRef": "#"}}}, "https://json-schema.org/draft/2019-09/meta/applicator": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/applicator", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/applicator": true}, "$recursiveAnchor": true, "title": "Applicator vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"additionalItems": {"$recursiveRef": "#"}, "unevaluatedItems": {"$recursiveRef": "#"}, "items": {"anyOf": [{"$recursiveRef": "#"}, {"$ref": "#/$defs/schemaArray"}]}, "contains": {"$recursiveRef": "#"}, "additionalProperties": {"$recursiveRef": "#"}, "unevaluatedProperties": {"$recursiveRef": "#"}, "properties": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "default": {}}, "patternProperties": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "propertyNames": {"format": "regex"}, "default": {}}, "dependentSchemas": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}}, "propertyNames": {"$recursiveRef": "#"}, "if": {"$recursiveRef": "#"}, "then": {"$recursiveRef": "#"}, "else": {"$recursiveRef": "#"}, "allOf": {"$ref": "#/$defs/schemaArray"}, "anyOf": {"$ref": "#/$defs/schemaArray"}, "oneOf": {"$ref": "#/$defs/schemaArray"}, "not": {"$recursiveRef": "#"}}, "$defs": {"schemaArray": {"type": "array", "minItems": 1, "items": {"$recursiveRef": "#"}}}}, "https://json-schema.org/draft/2019-09/meta/meta-data": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/meta-data": true}, "$recursiveAnchor": true, "title": "Meta-data vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"title": {"type": "string"}, "description": {"type": "string"}, "default": true, "deprecated": {"type": "boolean", "default": false}, "readOnly": {"type": "boolean", "default": false}, "writeOnly": {"type": "boolean", "default": false}, "examples": {"type": "array", "items": true}}}, "https://json-schema.org/draft/2019-09/meta/core": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/core", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/core": true}, "$recursiveAnchor": true, "title": "Core vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"$id": {"type": "string", "format": "uri-reference", "$comment": "Non-empty fragments not allowed.", "pattern": "^[^#]*#?$"}, "$schema": {"type": "string", "format": "uri"}, "$anchor": {"type": "string", "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$"}, "$ref": {"type": "string", "format": "uri-reference"}, "$recursiveRef": {"type": "string", "format": "uri-reference"}, "$recursiveAnchor": {"type": "boolean", "default": false}, "$vocabulary": {"type": "object", "propertyNames": {"type": "string", "format": "uri"}, "additionalProperties": {"type": "boolean"}}, "$comment": {"type": "string"}, "$defs": {"type": "object", "additionalProperties": {"$recursiveRef": "#"}, "default": {}}}}, "https://json-schema.org/draft/2019-09/meta/validation": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/validation", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/validation": true}, "$recursiveAnchor": true, "title": "Validation vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"multipleOf": {"type": "number", "exclusiveMinimum": 0}, "maximum": {"type": "number"}, "exclusiveMaximum": {"type": "number"}, "minimum": {"type": "number"}, "exclusiveMinimum": {"type": "number"}, "maxLength": {"$ref": "#/$defs/nonNegativeInteger"}, "minLength": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "pattern": {"type": "string", "format": "regex"}, "maxItems": {"$ref": "#/$defs/nonNegativeInteger"}, "minItems": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "uniqueItems": {"type": "boolean", "default": false}, "maxContains": {"$ref": "#/$defs/nonNegativeInteger"}, "minContains": {"$ref": "#/$defs/nonNegativeInteger", "default": 1}, "maxProperties": {"$ref": "#/$defs/nonNegativeInteger"}, "minProperties": {"$ref": "#/$defs/nonNegativeIntegerDefault0"}, "required": {"$ref": "#/$defs/stringArray"}, "dependentRequired": {"type": "object", "additionalProperties": {"$ref": "#/$defs/stringArray"}}, "const": true, "enum": {"type": "array", "items": true}, "type": {"anyOf": [{"$ref": "#/$defs/simpleTypes"}, {"type": "array", "items": {"$ref": "#/$defs/simpleTypes"}, "minItems": 1, "uniqueItems": true}]}}, "$defs": {"nonNegativeInteger": {"type": "integer", "minimum": 0}, "nonNegativeIntegerDefault0": {"$ref": "#/$defs/nonNegativeInteger", "default": 0}, "simpleTypes": {"enum": ["array", "boolean", "integer", "null", "number", "object", "string"]}, "stringArray": {"type": "array", "items": {"type": "string"}, "uniqueItems": true, "default": []}}}, "https://json-schema.org/draft/2019-09/meta/hyper-schema": {"$schema": "https://json-schema.org/draft/2019-09/hyper-schema", "$id": "https://json-schema.org/draft/2019-09/meta/hyper-schema", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/hyper-schema": true}, "$recursiveAnchor": true, "title": "JSON Hyper-Schema Vocabulary Schema", "type": ["object", "boolean"], "properties": {"base": {"type": "string", "format": "uri-template"}, "links": {"type": "array", "items": {"$ref": "https://json-schema.org/draft/2019-09/links"}}}, "links": [{"rel": "self", "href": "{+%24id}"}]}, "https://json-schema.org/draft/2019-09/meta/format": {"$schema": "https://json-schema.org/draft/2019-09/schema", "$id": "https://json-schema.org/draft/2019-09/meta/format", "$vocabulary": {"https://json-schema.org/draft/2019-09/vocab/format": true}, "$recursiveAnchor": true, "title": "Format vocabulary meta-schema", "type": ["object", "boolean"], "properties": {"format": {"type": "string"}}}} diff --git a/src/poetry/core/_vendor/jsonschema/validators.py b/src/poetry/core/_vendor/jsonschema/validators.py new file mode 100644 index 0000000..2e33c40 --- /dev/null +++ b/src/poetry/core/_vendor/jsonschema/validators.py @@ -0,0 +1,1156 @@ +""" +Creation and extension of validators, with implementations for existing drafts. +""" +from __future__ import annotations + +from collections import deque +from collections.abc import Sequence +from functools import lru_cache +from operator import methodcaller +from urllib.parse import unquote, urldefrag, urljoin, urlsplit +from urllib.request import urlopen +from warnings import warn +import contextlib +import json +import reprlib +import typing +import warnings + +from pyrsistent import m +import attr + +from jsonschema import ( + _format, + _legacy_validators, + _types, + _utils, + _validators, + exceptions, +) + +_UNSET = _utils.Unset() + +_VALIDATORS: dict[str, typing.Any] = {} +_META_SCHEMAS = _utils.URIDict() +_VOCABULARIES: list[tuple[str, typing.Any]] = [] + + +def __getattr__(name): + if name == "ErrorTree": + warnings.warn( + "Importing ErrorTree from jsonschema.validators is deprecated. " + "Instead import it from jsonschema.exceptions.", + DeprecationWarning, + stacklevel=2, + ) + from jsonschema.exceptions import ErrorTree + return ErrorTree + elif name == "validators": + warnings.warn( + "Accessing jsonschema.validators.validators is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _VALIDATORS + elif name == "meta_schemas": + warnings.warn( + "Accessing jsonschema.validators.meta_schemas is deprecated. " + "Use jsonschema.validators.validator_for with a given schema.", + DeprecationWarning, + stacklevel=2, + ) + return _META_SCHEMAS + raise AttributeError(f"module {__name__} has no attribute {name}") + + +def validates(version): + """ + Register the decorated validator for a ``version`` of the specification. + + Registered validators and their meta schemas will be considered when + parsing :kw:`$schema` keywords' URIs. + + Arguments: + + version (str): + + An identifier to use as the version's name + + Returns: + + collections.abc.Callable: + + a class decorator to decorate the validator with the version + """ + + def _validates(cls): + _VALIDATORS[version] = cls + meta_schema_id = cls.ID_OF(cls.META_SCHEMA) + _META_SCHEMAS[meta_schema_id] = cls + return cls + return _validates + + +def _id_of(schema): + """ + Return the ID of a schema for recent JSON Schema drafts. + """ + if schema is True or schema is False: + return "" + return schema.get("$id", "") + + +def _store_schema_list(): + if not _VOCABULARIES: + _VOCABULARIES.extend(_utils.load_schema("vocabularies").items()) + return [ + (id, validator.META_SCHEMA) for id, validator in _META_SCHEMAS.items() + ] + _VOCABULARIES + + +def create( + meta_schema, + validators=(), + version=None, + type_checker=_types.draft202012_type_checker, + format_checker=_format.draft202012_format_checker, + id_of=_id_of, + applicable_validators=methodcaller("items"), +): + """ + Create a new validator class. + + Arguments: + + meta_schema (collections.abc.Mapping): + + the meta schema for the new validator class + + validators (collections.abc.Mapping): + + a mapping from names to callables, where each callable will + validate the schema property with the given name. + + Each callable should take 4 arguments: + + 1. a validator instance, + 2. the value of the property being validated within the + instance + 3. the instance + 4. the schema + + version (str): + + an identifier for the version that this validator class will + validate. If provided, the returned validator class will + have its ``__name__`` set to include the version, and also + will have `jsonschema.validators.validates` automatically + called for the given version. + + type_checker (jsonschema.TypeChecker): + + a type checker, used when applying the :kw:`type` keyword. + + If unprovided, a `jsonschema.TypeChecker` will be created + with a set of default types typical of JSON Schema drafts. + + format_checker (jsonschema.FormatChecker): + + a format checker, used when applying the :kw:`format` keyword. + + If unprovided, a `jsonschema.FormatChecker` will be created + with a set of default formats typical of JSON Schema drafts. + + id_of (collections.abc.Callable): + + A function that given a schema, returns its ID. + + applicable_validators (collections.abc.Callable): + + A function that given a schema, returns the list of + applicable validators (validation keywords and callables) + which will be used to validate the instance. + + Returns: + + a new `jsonschema.protocols.Validator` class + """ + # preemptively don't shadow the `Validator.format_checker` local + format_checker_arg = format_checker + + @attr.s + class Validator: + + VALIDATORS = dict(validators) + META_SCHEMA = dict(meta_schema) + TYPE_CHECKER = type_checker + FORMAT_CHECKER = format_checker_arg + ID_OF = staticmethod(id_of) + + schema = attr.ib(repr=reprlib.repr) + resolver = attr.ib(default=None, repr=False) + format_checker = attr.ib(default=None) + + def __init_subclass__(cls): + warnings.warn( + ( + "Subclassing validator classes is not intended to " + "be part of their public API. A future version " + "will make doing so an error, as the behavior of " + "subclasses isn't guaranteed to stay the same " + "between releases of jsonschema. Instead, prefer " + "composition of validators, wrapping them in an object " + "owned entirely by the downstream library." + ), + DeprecationWarning, + stacklevel=2, + ) + + def __attrs_post_init__(self): + if self.resolver is None: + self.resolver = RefResolver.from_schema( + self.schema, + id_of=id_of, + ) + + @classmethod + def check_schema(cls, schema, format_checker=_UNSET): + Validator = validator_for(cls.META_SCHEMA, default=cls) + if format_checker is _UNSET: + format_checker = Validator.FORMAT_CHECKER + validator = Validator( + schema=cls.META_SCHEMA, + format_checker=format_checker, + ) + for error in validator.iter_errors(schema): + raise exceptions.SchemaError.create_from(error) + + def evolve(self, **changes): + # Essentially reproduces attr.evolve, but may involve instantiating + # a different class than this one. + cls = self.__class__ + + schema = changes.setdefault("schema", self.schema) + NewValidator = validator_for(schema, default=cls) + + for field in attr.fields(cls): + if not field.init: + continue + attr_name = field.name # To deal with private attributes. + init_name = attr_name if attr_name[0] != "_" else attr_name[1:] + if init_name not in changes: + changes[init_name] = getattr(self, attr_name) + + return NewValidator(**changes) + + def iter_errors(self, instance, _schema=None): + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.iter_errors " + "is deprecated and will be removed in a future " + "release. Call validator.evolve(schema=new_schema)." + "iter_errors(...) instead." + ), + DeprecationWarning, + stacklevel=2, + ) + else: + _schema = self.schema + + if _schema is True: + return + elif _schema is False: + yield exceptions.ValidationError( + f"False schema does not allow {instance!r}", + validator=None, + validator_value=None, + instance=instance, + schema=_schema, + ) + return + + scope = id_of(_schema) + if scope: + self.resolver.push_scope(scope) + try: + for k, v in applicable_validators(_schema): + validator = self.VALIDATORS.get(k) + if validator is None: + continue + + errors = validator(self, v, instance, _schema) or () + for error in errors: + # set details if not already set by the called fn + error._set( + validator=k, + validator_value=v, + instance=instance, + schema=_schema, + type_checker=self.TYPE_CHECKER, + ) + if k not in {"if", "$ref"}: + error.schema_path.appendleft(k) + yield error + finally: + if scope: + self.resolver.pop_scope() + + def descend(self, instance, schema, path=None, schema_path=None): + for error in self.evolve(schema=schema).iter_errors(instance): + if path is not None: + error.path.appendleft(path) + if schema_path is not None: + error.schema_path.appendleft(schema_path) + yield error + + def validate(self, *args, **kwargs): + for error in self.iter_errors(*args, **kwargs): + raise error + + def is_type(self, instance, type): + try: + return self.TYPE_CHECKER.is_type(instance, type) + except exceptions.UndefinedTypeCheck: + raise exceptions.UnknownType(type, instance, self.schema) + + def is_valid(self, instance, _schema=None): + if _schema is not None: + warnings.warn( + ( + "Passing a schema to Validator.is_valid is deprecated " + "and will be removed in a future release. Call " + "validator.evolve(schema=new_schema).is_valid(...) " + "instead." + ), + DeprecationWarning, + stacklevel=2, + ) + self = self.evolve(schema=_schema) + + error = next(self.iter_errors(instance), None) + return error is None + + if version is not None: + safe = version.title().replace(" ", "").replace("-", "") + Validator.__name__ = Validator.__qualname__ = f"{safe}Validator" + Validator = validates(version)(Validator) + + return Validator + + +def extend( + validator, + validators=(), + version=None, + type_checker=None, + format_checker=None, +): + """ + Create a new validator class by extending an existing one. + + Arguments: + + validator (jsonschema.protocols.Validator): + + an existing validator class + + validators (collections.abc.Mapping): + + a mapping of new validator callables to extend with, whose + structure is as in `create`. + + .. note:: + + Any validator callables with the same name as an + existing one will (silently) replace the old validator + callable entirely, effectively overriding any validation + done in the "parent" validator class. + + If you wish to instead extend the behavior of a parent's + validator callable, delegate and call it directly in + the new validator function by retrieving it using + ``OldValidator.VALIDATORS["validation_keyword_name"]``. + + version (str): + + a version for the new validator class + + type_checker (jsonschema.TypeChecker): + + a type checker, used when applying the :kw:`type` keyword. + + If unprovided, the type checker of the extended + `jsonschema.protocols.Validator` will be carried along. + + format_checker (jsonschema.FormatChecker): + + a format checker, used when applying the :kw:`format` keyword. + + If unprovided, the format checker of the extended + `jsonschema.protocols.Validator` will be carried along. + + Returns: + + a new `jsonschema.protocols.Validator` class extending the one + provided + + .. note:: Meta Schemas + + The new validator class will have its parent's meta schema. + + If you wish to change or extend the meta schema in the new + validator class, modify ``META_SCHEMA`` directly on the returned + class. Note that no implicit copying is done, so a copy should + likely be made before modifying it, in order to not affect the + old validator. + """ + + all_validators = dict(validator.VALIDATORS) + all_validators.update(validators) + + if type_checker is None: + type_checker = validator.TYPE_CHECKER + if format_checker is None: + format_checker = validator.FORMAT_CHECKER + return create( + meta_schema=validator.META_SCHEMA, + validators=all_validators, + version=version, + type_checker=type_checker, + format_checker=format_checker, + id_of=validator.ID_OF, + ) + + +Draft3Validator = create( + meta_schema=_utils.load_schema("draft3"), + validators={ + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "dependencies": _legacy_validators.dependencies_draft3, + "disallow": _legacy_validators.disallow_draft3, + "divisibleBy": _validators.multipleOf, + "enum": _validators.enum, + "extends": _legacy_validators.extends_draft3, + "format": _validators.format, + "items": _legacy_validators.items_draft3_draft4, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maximum": _legacy_validators.maximum_draft3_draft4, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minimum": _legacy_validators.minimum_draft3_draft4, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _legacy_validators.properties_draft3, + "type": _legacy_validators.type_draft3, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft3_type_checker, + format_checker=_format.draft3_format_checker, + version="draft3", + id_of=_legacy_validators.id_of_ignore_ref(property="id"), + applicable_validators=_legacy_validators.ignore_ref_siblings, +) + +Draft4Validator = create( + meta_schema=_utils.load_schema("draft4"), + validators={ + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "format": _validators.format, + "items": _legacy_validators.items_draft3_draft4, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _legacy_validators.maximum_draft3_draft4, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _legacy_validators.minimum_draft3_draft4, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft4_type_checker, + format_checker=_format.draft4_format_checker, + version="draft4", + id_of=_legacy_validators.id_of_ignore_ref(property="id"), + applicable_validators=_legacy_validators.ignore_ref_siblings, +) + +Draft6Validator = create( + meta_schema=_utils.load_schema("draft6"), + validators={ + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _legacy_validators.contains_draft6_draft7, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft6_type_checker, + format_checker=_format.draft6_format_checker, + version="draft6", + id_of=_legacy_validators.id_of_ignore_ref(), + applicable_validators=_legacy_validators.ignore_ref_siblings, +) + +Draft7Validator = create( + meta_schema=_utils.load_schema("draft7"), + validators={ + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _legacy_validators.contains_draft6_draft7, + "dependencies": _legacy_validators.dependencies_draft4_draft6_draft7, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft7_type_checker, + format_checker=_format.draft7_format_checker, + version="draft7", + id_of=_legacy_validators.id_of_ignore_ref(), + applicable_validators=_legacy_validators.ignore_ref_siblings, +) + +Draft201909Validator = create( + meta_schema=_utils.load_schema("draft2019-09"), + validators={ + "$recursiveRef": _legacy_validators.recursiveRef, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _validators.contains, + "dependentRequired": _validators.dependentRequired, + "dependentSchemas": _validators.dependentSchemas, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _legacy_validators.items_draft6_draft7_draft201909, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "unevaluatedItems": _legacy_validators.unevaluatedItems_draft2019, + "unevaluatedProperties": _validators.unevaluatedProperties, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft201909_type_checker, + format_checker=_format.draft201909_format_checker, + version="draft2019-09", +) + +Draft202012Validator = create( + meta_schema=_utils.load_schema("draft2020-12"), + validators={ + "$dynamicRef": _validators.dynamicRef, + "$ref": _validators.ref, + "additionalItems": _validators.additionalItems, + "additionalProperties": _validators.additionalProperties, + "allOf": _validators.allOf, + "anyOf": _validators.anyOf, + "const": _validators.const, + "contains": _validators.contains, + "dependentRequired": _validators.dependentRequired, + "dependentSchemas": _validators.dependentSchemas, + "enum": _validators.enum, + "exclusiveMaximum": _validators.exclusiveMaximum, + "exclusiveMinimum": _validators.exclusiveMinimum, + "format": _validators.format, + "if": _validators.if_, + "items": _validators.items, + "maxItems": _validators.maxItems, + "maxLength": _validators.maxLength, + "maxProperties": _validators.maxProperties, + "maximum": _validators.maximum, + "minItems": _validators.minItems, + "minLength": _validators.minLength, + "minProperties": _validators.minProperties, + "minimum": _validators.minimum, + "multipleOf": _validators.multipleOf, + "not": _validators.not_, + "oneOf": _validators.oneOf, + "pattern": _validators.pattern, + "patternProperties": _validators.patternProperties, + "prefixItems": _validators.prefixItems, + "properties": _validators.properties, + "propertyNames": _validators.propertyNames, + "required": _validators.required, + "type": _validators.type, + "unevaluatedItems": _validators.unevaluatedItems, + "unevaluatedProperties": _validators.unevaluatedProperties, + "uniqueItems": _validators.uniqueItems, + }, + type_checker=_types.draft202012_type_checker, + format_checker=_format.draft202012_format_checker, + version="draft2020-12", +) + +_LATEST_VERSION = Draft202012Validator + + +class RefResolver: + """ + Resolve JSON References. + + Arguments: + + base_uri (str): + + The URI of the referring document + + referrer: + + The actual referring document + + store (dict): + + A mapping from URIs to documents to cache + + cache_remote (bool): + + Whether remote refs should be cached after first resolution + + handlers (dict): + + A mapping from URI schemes to functions that should be used + to retrieve them + + urljoin_cache (:func:`functools.lru_cache`): + + A cache that will be used for caching the results of joining + the resolution scope to subscopes. + + remote_cache (:func:`functools.lru_cache`): + + A cache that will be used for caching the results of + resolved remote URLs. + + Attributes: + + cache_remote (bool): + + Whether remote refs should be cached after first resolution + """ + + def __init__( + self, + base_uri, + referrer, + store=m(), + cache_remote=True, + handlers=(), + urljoin_cache=None, + remote_cache=None, + ): + if urljoin_cache is None: + urljoin_cache = lru_cache(1024)(urljoin) + if remote_cache is None: + remote_cache = lru_cache(1024)(self.resolve_from_url) + + self.referrer = referrer + self.cache_remote = cache_remote + self.handlers = dict(handlers) + + self._scopes_stack = [base_uri] + + self.store = _utils.URIDict(_store_schema_list()) + self.store.update(store) + self.store.update( + (schema["$id"], schema) + for schema in store.values() if "$id" in schema + ) + self.store[base_uri] = referrer + + self._urljoin_cache = urljoin_cache + self._remote_cache = remote_cache + + @classmethod + def from_schema(cls, schema, id_of=_id_of, *args, **kwargs): + """ + Construct a resolver from a JSON schema object. + + Arguments: + + schema: + + the referring schema + + Returns: + + `RefResolver` + """ + + return cls(base_uri=id_of(schema), referrer=schema, *args, **kwargs) # noqa: B026, E501 + + def push_scope(self, scope): + """ + Enter a given sub-scope. + + Treats further dereferences as being performed underneath the + given scope. + """ + self._scopes_stack.append( + self._urljoin_cache(self.resolution_scope, scope), + ) + + def pop_scope(self): + """ + Exit the most recent entered scope. + + Treats further dereferences as being performed underneath the + original scope. + + Don't call this method more times than `push_scope` has been + called. + """ + try: + self._scopes_stack.pop() + except IndexError: + raise exceptions.RefResolutionError( + "Failed to pop the scope from an empty stack. " + "`pop_scope()` should only be called once for every " + "`push_scope()`", + ) + + @property + def resolution_scope(self): + """ + Retrieve the current resolution scope. + """ + return self._scopes_stack[-1] + + @property + def base_uri(self): + """ + Retrieve the current base URI, not including any fragment. + """ + uri, _ = urldefrag(self.resolution_scope) + return uri + + @contextlib.contextmanager + def in_scope(self, scope): + """ + Temporarily enter the given scope for the duration of the context. + + .. deprecated:: v4.0.0 + """ + warnings.warn( + "jsonschema.RefResolver.in_scope is deprecated and will be " + "removed in a future release.", + DeprecationWarning, + stacklevel=3, + ) + self.push_scope(scope) + try: + yield + finally: + self.pop_scope() + + @contextlib.contextmanager + def resolving(self, ref): + """ + Resolve the given ``ref`` and enter its resolution scope. + + Exits the scope on exit of this context manager. + + Arguments: + + ref (str): + + The reference to resolve + """ + + url, resolved = self.resolve(ref) + self.push_scope(url) + try: + yield resolved + finally: + self.pop_scope() + + def _find_in_referrer(self, key): + return self._get_subschemas_cache()[key] + + @lru_cache() # noqa: B019 + def _get_subschemas_cache(self): + cache = {key: [] for key in _SUBSCHEMAS_KEYWORDS} + for keyword, subschema in _search_schema( + self.referrer, _match_subschema_keywords, + ): + cache[keyword].append(subschema) + return cache + + @lru_cache() # noqa: B019 + def _find_in_subschemas(self, url): + subschemas = self._get_subschemas_cache()["$id"] + if not subschemas: + return None + uri, fragment = urldefrag(url) + for subschema in subschemas: + target_uri = self._urljoin_cache( + self.resolution_scope, subschema["$id"], + ) + if target_uri.rstrip("/") == uri.rstrip("/"): + if fragment: + subschema = self.resolve_fragment(subschema, fragment) + self.store[url] = subschema + return url, subschema + return None + + def resolve(self, ref): + """ + Resolve the given reference. + """ + url = self._urljoin_cache(self.resolution_scope, ref).rstrip("/") + + match = self._find_in_subschemas(url) + if match is not None: + return match + + return url, self._remote_cache(url) + + def resolve_from_url(self, url): + """ + Resolve the given URL. + """ + url, fragment = urldefrag(url) + if not url: + url = self.base_uri + + try: + document = self.store[url] + except KeyError: + try: + document = self.resolve_remote(url) + except Exception as exc: + raise exceptions.RefResolutionError(exc) + + return self.resolve_fragment(document, fragment) + + def resolve_fragment(self, document, fragment): + """ + Resolve a ``fragment`` within the referenced ``document``. + + Arguments: + + document: + + The referent document + + fragment (str): + + a URI fragment to resolve within it + """ + + fragment = fragment.lstrip("/") + + if not fragment: + return document + + if document is self.referrer: + find = self._find_in_referrer + else: + + def find(key): + yield from _search_schema(document, _match_keyword(key)) + + for keyword in ["$anchor", "$dynamicAnchor"]: + for subschema in find(keyword): + if fragment == subschema[keyword]: + return subschema + for keyword in ["id", "$id"]: + for subschema in find(keyword): + if "#" + fragment == subschema[keyword]: + return subschema + + # Resolve via path + parts = unquote(fragment).split("/") if fragment else [] + for part in parts: + part = part.replace("~1", "/").replace("~0", "~") + + if isinstance(document, Sequence): + # Array indexes should be turned into integers + try: + part = int(part) + except ValueError: + pass + try: + document = document[part] + except (TypeError, LookupError): + raise exceptions.RefResolutionError( + f"Unresolvable JSON pointer: {fragment!r}", + ) + + return document + + def resolve_remote(self, uri): + """ + Resolve a remote ``uri``. + + If called directly, does not check the store first, but after + retrieving the document at the specified URI it will be saved in + the store if :attr:`cache_remote` is True. + + .. note:: + + If the requests_ library is present, ``jsonschema`` will use it to + request the remote ``uri``, so that the correct encoding is + detected and used. + + If it isn't, or if the scheme of the ``uri`` is not ``http`` or + ``https``, UTF-8 is assumed. + + Arguments: + + uri (str): + + The URI to resolve + + Returns: + + The retrieved document + + .. _requests: https://pypi.org/project/requests/ + """ + try: + import requests + except ImportError: + requests = None + + scheme = urlsplit(uri).scheme + + if scheme in self.handlers: + result = self.handlers[scheme](uri) + elif scheme in ["http", "https"] and requests: + # Requests has support for detecting the correct encoding of + # json over http + result = requests.get(uri).json() + else: + # Otherwise, pass off to urllib and assume utf-8 + with urlopen(uri) as url: + result = json.loads(url.read().decode("utf-8")) + + if self.cache_remote: + self.store[uri] = result + return result + + +_SUBSCHEMAS_KEYWORDS = ("$id", "id", "$anchor", "$dynamicAnchor") + + +def _match_keyword(keyword): + + def matcher(value): + if keyword in value: + yield value + + return matcher + + +def _match_subschema_keywords(value): + for keyword in _SUBSCHEMAS_KEYWORDS: + if keyword in value: + yield keyword, value + + +def _search_schema(schema, matcher): + """Breadth-first search routine.""" + values = deque([schema]) + while values: + value = values.pop() + if not isinstance(value, dict): + continue + yield from matcher(value) + values.extendleft(value.values()) + + +def validate(instance, schema, cls=None, *args, **kwargs): + """ + Validate an instance under the given schema. + + >>> validate([2, 3, 4], {"maxItems": 2}) + Traceback (most recent call last): + ... + ValidationError: [2, 3, 4] is too long + + :func:`validate` will first verify that the provided schema is + itself valid, since not doing so can lead to less obvious error + messages and fail in less obvious or consistent ways. + + If you know you have a valid schema already, especially + if you intend to validate multiple instances with + the same schema, you likely would prefer using the + `jsonschema.protocols.Validator.validate` method directly on a + specific validator (e.g. ``Draft20212Validator.validate``). + + + Arguments: + + instance: + + The instance to validate + + schema: + + The schema to validate with + + cls (jsonschema.protocols.Validator): + + The class that will be used to validate the instance. + + If the ``cls`` argument is not provided, two things will happen + in accordance with the specification. First, if the schema has a + :kw:`$schema` keyword containing a known meta-schema [#]_ then the + proper validator will be used. The specification recommends that + all schemas contain :kw:`$schema` properties for this reason. If no + :kw:`$schema` property is found, the default validator class is the + latest released draft. + + Any other provided positional and keyword arguments will be passed + on when instantiating the ``cls``. + + Raises: + + `jsonschema.exceptions.ValidationError`: + + if the instance is invalid + + `jsonschema.exceptions.SchemaError`: + + if the schema itself is invalid + + .. rubric:: Footnotes + .. [#] known by a validator registered with + `jsonschema.validators.validates` + """ + if cls is None: + cls = validator_for(schema) + + cls.check_schema(schema) + validator = cls(schema, *args, **kwargs) + error = exceptions.best_match(validator.iter_errors(instance)) + if error is not None: + raise error + + +def validator_for(schema, default=_UNSET): + """ + Retrieve the validator class appropriate for validating the given schema. + + Uses the :kw:`$schema` keyword that should be present in the given + schema to look up the appropriate validator class. + + Arguments: + + schema (collections.abc.Mapping or bool): + + the schema to look at + + default: + + the default to return if the appropriate validator class + cannot be determined. + + If unprovided, the default is to return the latest supported + draft. + """ + + DefaultValidator = _LATEST_VERSION if default is _UNSET else default + + if schema is True or schema is False or "$schema" not in schema: + return DefaultValidator + if schema["$schema"] not in _META_SCHEMAS: + if default is _UNSET: + warn( + ( + "The metaschema specified by $schema was not found. " + "Using the latest draft to validate, but this will raise " + "an error in the future." + ), + DeprecationWarning, + stacklevel=2, + ) + return _META_SCHEMAS.get(schema["$schema"], DefaultValidator) diff --git a/src/poetry/core/_vendor/lark/LICENSE b/src/poetry/core/_vendor/lark/LICENSE new file mode 100644 index 0000000..9201da2 --- /dev/null +++ b/src/poetry/core/_vendor/lark/LICENSE @@ -0,0 +1,18 @@ +Copyright © 2017 Erez Shinan + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/poetry/core/_vendor/lark/__init__.py b/src/poetry/core/_vendor/lark/__init__.py new file mode 100644 index 0000000..d8d0c88 --- /dev/null +++ b/src/poetry/core/_vendor/lark/__init__.py @@ -0,0 +1,38 @@ +from .exceptions import ( + GrammarError, + LarkError, + LexError, + ParseError, + UnexpectedCharacters, + UnexpectedEOF, + UnexpectedInput, + UnexpectedToken, +) +from .lark import Lark +from .lexer import Token +from .tree import ParseTree, Tree +from .utils import logger +from .visitors import Discard, Transformer, Transformer_NonRecursive, Visitor, v_args + +__version__: str = "1.1.4" + +__all__ = ( + "GrammarError", + "LarkError", + "LexError", + "ParseError", + "UnexpectedCharacters", + "UnexpectedEOF", + "UnexpectedInput", + "UnexpectedToken", + "Lark", + "Token", + "ParseTree", + "Tree", + "logger", + "Discard", + "Transformer", + "Transformer_NonRecursive", + "Visitor", + "v_args", +) diff --git a/src/poetry/core/_vendor/lark/__pyinstaller/__init__.py b/src/poetry/core/_vendor/lark/__pyinstaller/__init__.py new file mode 100644 index 0000000..9da62a3 --- /dev/null +++ b/src/poetry/core/_vendor/lark/__pyinstaller/__init__.py @@ -0,0 +1,6 @@ +# For usage of lark with PyInstaller. See https://pyinstaller-sample-hook.readthedocs.io/en/latest/index.html + +import os + +def get_hook_dirs(): + return [os.path.dirname(__file__)] diff --git a/src/poetry/core/_vendor/lark/__pyinstaller/hook-lark.py b/src/poetry/core/_vendor/lark/__pyinstaller/hook-lark.py new file mode 100644 index 0000000..cf3d8e3 --- /dev/null +++ b/src/poetry/core/_vendor/lark/__pyinstaller/hook-lark.py @@ -0,0 +1,14 @@ +#----------------------------------------------------------------------------- +# Copyright (c) 2017-2020, PyInstaller Development Team. +# +# Distributed under the terms of the GNU General Public License (version 2 +# or later) with exception for distributing the bootloader. +# +# The full license is in the file COPYING.txt, distributed with this software. +# +# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception) +#----------------------------------------------------------------------------- + +from PyInstaller.utils.hooks import collect_data_files + +datas = collect_data_files('lark') diff --git a/src/poetry/core/_vendor/lark/ast_utils.py b/src/poetry/core/_vendor/lark/ast_utils.py new file mode 100644 index 0000000..0ceee98 --- /dev/null +++ b/src/poetry/core/_vendor/lark/ast_utils.py @@ -0,0 +1,59 @@ +""" + Module of utilities for transforming a lark.Tree into a custom Abstract Syntax Tree +""" + +import inspect, re +import types +from typing import Optional, Callable + +from lark import Transformer, v_args + +class Ast: + """Abstract class + + Subclasses will be collected by `create_transformer()` + """ + pass + +class AsList: + """Abstract class + + Subclasses will be instantiated with the parse results as a single list, instead of as arguments. + """ + +class WithMeta: + """Abstract class + + Subclasses will be instantiated with the Meta instance of the tree. (see ``v_args`` for more detail) + """ + pass + +def camel_to_snake(name): + return re.sub(r'(? Transformer: + """Collects `Ast` subclasses from the given module, and creates a Lark transformer that builds the AST. + + For each class, we create a corresponding rule in the transformer, with a matching name. + CamelCase names will be converted into snake_case. Example: "CodeBlock" -> "code_block". + + Classes starting with an underscore (`_`) will be skipped. + + Parameters: + ast_module: A Python module containing all the subclasses of ``ast_utils.Ast`` + transformer (Optional[Transformer]): An initial transformer. Its attributes may be overwritten. + decorator_factory (Callable): An optional callable accepting two booleans, inline, and meta, + and returning a decorator for the methods of ``transformer``. (default: ``v_args``). + """ + t = transformer or Transformer() + + for name, obj in inspect.getmembers(ast_module): + if not name.startswith('_') and inspect.isclass(obj): + if issubclass(obj, Ast): + wrapper = decorator_factory(inline=not issubclass(obj, AsList), meta=issubclass(obj, WithMeta)) + obj = wrapper(obj).__get__(t) + setattr(t, camel_to_snake(name), obj) + + return t diff --git a/src/poetry/core/_vendor/lark/common.py b/src/poetry/core/_vendor/lark/common.py new file mode 100644 index 0000000..d716add --- /dev/null +++ b/src/poetry/core/_vendor/lark/common.py @@ -0,0 +1,82 @@ +from copy import deepcopy +import sys +from types import ModuleType +from typing import Callable, Collection, Dict, Optional, TYPE_CHECKING + +if TYPE_CHECKING: + from .lark import PostLex + from .lexer import Lexer + from typing import Union, Type + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + if sys.version_info >= (3, 10): + from typing import TypeAlias + else: + from typing_extensions import TypeAlias + +from .utils import Serialize +from .lexer import TerminalDef, Token + +###{standalone + +_ParserArgType: 'TypeAlias' = 'Literal["earley", "lalr", "cyk", "auto"]' +_LexerArgType: 'TypeAlias' = 'Union[Literal["auto", "basic", "contextual", "dynamic", "dynamic_complete"], Type[Lexer]]' +_Callback = Callable[[Token], Token] + +class LexerConf(Serialize): + __serialize_fields__ = 'terminals', 'ignore', 'g_regex_flags', 'use_bytes', 'lexer_type' + __serialize_namespace__ = TerminalDef, + + terminals: Collection[TerminalDef] + re_module: ModuleType + ignore: Collection[str] + postlex: 'Optional[PostLex]' + callbacks: Dict[str, _Callback] + g_regex_flags: int + skip_validation: bool + use_bytes: bool + lexer_type: Optional[_LexerArgType] + + def __init__(self, terminals: Collection[TerminalDef], re_module: ModuleType, ignore: Collection[str]=(), postlex: 'Optional[PostLex]'=None, callbacks: Optional[Dict[str, _Callback]]=None, g_regex_flags: int=0, skip_validation: bool=False, use_bytes: bool=False): + self.terminals = terminals + self.terminals_by_name = {t.name: t for t in self.terminals} + assert len(self.terminals) == len(self.terminals_by_name) + self.ignore = ignore + self.postlex = postlex + self.callbacks = callbacks or {} + self.g_regex_flags = g_regex_flags + self.re_module = re_module + self.skip_validation = skip_validation + self.use_bytes = use_bytes + self.lexer_type = None + + def _deserialize(self): + self.terminals_by_name = {t.name: t for t in self.terminals} + + def __deepcopy__(self, memo=None): + return type(self)( + deepcopy(self.terminals, memo), + self.re_module, + deepcopy(self.ignore, memo), + deepcopy(self.postlex, memo), + deepcopy(self.callbacks, memo), + deepcopy(self.g_regex_flags, memo), + deepcopy(self.skip_validation, memo), + deepcopy(self.use_bytes, memo), + ) + + +class ParserConf(Serialize): + __serialize_fields__ = 'rules', 'start', 'parser_type' + + def __init__(self, rules, callbacks, start): + assert isinstance(start, list) + self.rules = rules + self.callbacks = callbacks + self.start = start + + self.parser_type = None + +###} diff --git a/src/poetry/core/_vendor/lark/exceptions.py b/src/poetry/core/_vendor/lark/exceptions.py new file mode 100644 index 0000000..35b986a --- /dev/null +++ b/src/poetry/core/_vendor/lark/exceptions.py @@ -0,0 +1,292 @@ +from .utils import logger, NO_VALUE +from typing import Mapping, Iterable, Callable, Union, TypeVar, Tuple, Any, List, Set, Optional, Collection, TYPE_CHECKING + +if TYPE_CHECKING: + from .lexer import Token + from .parsers.lalr_interactive_parser import InteractiveParser + from .tree import Tree + +###{standalone + +class LarkError(Exception): + pass + + +class ConfigurationError(LarkError, ValueError): + pass + + +def assert_config(value, options: Collection, msg='Got %r, expected one of %s'): + if value not in options: + raise ConfigurationError(msg % (value, options)) + + +class GrammarError(LarkError): + pass + + +class ParseError(LarkError): + pass + + +class LexError(LarkError): + pass + +T = TypeVar('T') + +class UnexpectedInput(LarkError): + """UnexpectedInput Error. + + Used as a base class for the following exceptions: + + - ``UnexpectedCharacters``: The lexer encountered an unexpected string + - ``UnexpectedToken``: The parser received an unexpected token + - ``UnexpectedEOF``: The parser expected a token, but the input ended + + After catching one of these exceptions, you may call the following helper methods to create a nicer error message. + """ + line: int + column: int + pos_in_stream = None + state: Any + _terminals_by_name = None + + def get_context(self, text: str, span: int=40) -> str: + """Returns a pretty string pinpointing the error in the text, + with span amount of context characters around it. + + Note: + The parser doesn't hold a copy of the text it has to parse, + so you have to provide it again + """ + assert self.pos_in_stream is not None, self + pos = self.pos_in_stream + start = max(pos - span, 0) + end = pos + span + if not isinstance(text, bytes): + before = text[start:pos].rsplit('\n', 1)[-1] + after = text[pos:end].split('\n', 1)[0] + return before + after + '\n' + ' ' * len(before.expandtabs()) + '^\n' + else: + before = text[start:pos].rsplit(b'\n', 1)[-1] + after = text[pos:end].split(b'\n', 1)[0] + return (before + after + b'\n' + b' ' * len(before.expandtabs()) + b'^\n').decode("ascii", "backslashreplace") + + def match_examples(self, parse_fn: 'Callable[[str], Tree]', + examples: Union[Mapping[T, Iterable[str]], Iterable[Tuple[T, Iterable[str]]]], + token_type_match_fallback: bool=False, + use_accepts: bool=True + ) -> Optional[T]: + """Allows you to detect what's wrong in the input text by matching + against example errors. + + Given a parser instance and a dictionary mapping some label with + some malformed syntax examples, it'll return the label for the + example that bests matches the current error. The function will + iterate the dictionary until it finds a matching error, and + return the corresponding value. + + For an example usage, see `examples/error_reporting_lalr.py` + + Parameters: + parse_fn: parse function (usually ``lark_instance.parse``) + examples: dictionary of ``{'example_string': value}``. + use_accepts: Recommended to keep this as ``use_accepts=True``. + """ + assert self.state is not None, "Not supported for this exception" + + if isinstance(examples, Mapping): + examples = examples.items() + + candidate = (None, False) + for i, (label, example) in enumerate(examples): + assert not isinstance(example, str), "Expecting a list" + + for j, malformed in enumerate(example): + try: + parse_fn(malformed) + except UnexpectedInput as ut: + if ut.state == self.state: + if ( + use_accepts + and isinstance(self, UnexpectedToken) + and isinstance(ut, UnexpectedToken) + and ut.accepts != self.accepts + ): + logger.debug("Different accepts with same state[%d]: %s != %s at example [%s][%s]" % + (self.state, self.accepts, ut.accepts, i, j)) + continue + if ( + isinstance(self, (UnexpectedToken, UnexpectedEOF)) + and isinstance(ut, (UnexpectedToken, UnexpectedEOF)) + ): + if ut.token == self.token: # Try exact match first + logger.debug("Exact Match at example [%s][%s]" % (i, j)) + return label + + if token_type_match_fallback: + # Fallback to token types match + if (ut.token.type == self.token.type) and not candidate[-1]: + logger.debug("Token Type Fallback at example [%s][%s]" % (i, j)) + candidate = label, True + + if candidate[0] is None: + logger.debug("Same State match at example [%s][%s]" % (i, j)) + candidate = label, False + + return candidate[0] + + def _format_expected(self, expected): + if self._terminals_by_name: + d = self._terminals_by_name + expected = [d[t_name].user_repr() if t_name in d else t_name for t_name in expected] + return "Expected one of: \n\t* %s\n" % '\n\t* '.join(expected) + + +class UnexpectedEOF(ParseError, UnexpectedInput): + """An exception that is raised by the parser, when the input ends while it still expects a token. + """ + expected: 'List[Token]' + + def __init__(self, expected, state=None, terminals_by_name=None): + super(UnexpectedEOF, self).__init__() + + self.expected = expected + self.state = state + from .lexer import Token + self.token = Token("", "") # , line=-1, column=-1, pos_in_stream=-1) + self.pos_in_stream = -1 + self.line = -1 + self.column = -1 + self._terminals_by_name = terminals_by_name + + + def __str__(self): + message = "Unexpected end-of-input. " + message += self._format_expected(self.expected) + return message + + +class UnexpectedCharacters(LexError, UnexpectedInput): + """An exception that is raised by the lexer, when it cannot match the next + string of characters to any of its terminals. + """ + + allowed: Set[str] + considered_tokens: Set[Any] + + def __init__(self, seq, lex_pos, line, column, allowed=None, considered_tokens=None, state=None, token_history=None, + terminals_by_name=None, considered_rules=None): + super(UnexpectedCharacters, self).__init__() + + # TODO considered_tokens and allowed can be figured out using state + self.line = line + self.column = column + self.pos_in_stream = lex_pos + self.state = state + self._terminals_by_name = terminals_by_name + + self.allowed = allowed + self.considered_tokens = considered_tokens + self.considered_rules = considered_rules + self.token_history = token_history + + if isinstance(seq, bytes): + self.char = seq[lex_pos:lex_pos + 1].decode("ascii", "backslashreplace") + else: + self.char = seq[lex_pos] + self._context = self.get_context(seq) + + + def __str__(self): + message = "No terminal matches '%s' in the current parser context, at line %d col %d" % (self.char, self.line, self.column) + message += '\n\n' + self._context + if self.allowed: + message += self._format_expected(self.allowed) + if self.token_history: + message += '\nPrevious tokens: %s\n' % ', '.join(repr(t) for t in self.token_history) + return message + + +class UnexpectedToken(ParseError, UnexpectedInput): + """An exception that is raised by the parser, when the token it received + doesn't match any valid step forward. + + Parameters: + token: The mismatched token + expected: The set of expected tokens + considered_rules: Which rules were considered, to deduce the expected tokens + state: A value representing the parser state. Do not rely on its value or type. + interactive_parser: An instance of ``InteractiveParser``, that is initialized to the point of failture, + and can be used for debugging and error handling. + + Note: These parameters are available as attributes of the instance. + """ + + expected: Set[str] + considered_rules: Set[str] + interactive_parser: 'InteractiveParser' + + def __init__(self, token, expected, considered_rules=None, state=None, interactive_parser=None, terminals_by_name=None, token_history=None): + super(UnexpectedToken, self).__init__() + + # TODO considered_rules and expected can be figured out using state + self.line = getattr(token, 'line', '?') + self.column = getattr(token, 'column', '?') + self.pos_in_stream = getattr(token, 'start_pos', None) + self.state = state + + self.token = token + self.expected = expected # XXX deprecate? `accepts` is better + self._accepts = NO_VALUE + self.considered_rules = considered_rules + self.interactive_parser = interactive_parser + self._terminals_by_name = terminals_by_name + self.token_history = token_history + + + @property + def accepts(self) -> Set[str]: + if self._accepts is NO_VALUE: + self._accepts = self.interactive_parser and self.interactive_parser.accepts() + return self._accepts + + def __str__(self): + message = ("Unexpected token %r at line %s, column %s.\n%s" + % (self.token, self.line, self.column, self._format_expected(self.accepts or self.expected))) + if self.token_history: + message += "Previous tokens: %r\n" % self.token_history + + return message + + + +class VisitError(LarkError): + """VisitError is raised when visitors are interrupted by an exception + + It provides the following attributes for inspection: + + Parameters: + rule: the name of the visit rule that failed + obj: the tree-node or token that was being processed + orig_exc: the exception that cause it to fail + + Note: These parameters are available as attributes + """ + + obj: 'Union[Tree, Token]' + orig_exc: Exception + + def __init__(self, rule, obj, orig_exc): + message = 'Error trying to process rule "%s":\n\n%s' % (rule, orig_exc) + super(VisitError, self).__init__(message) + + self.rule = rule + self.obj = obj + self.orig_exc = orig_exc + + +class MissingVariableError(LarkError): + pass + +###} diff --git a/src/poetry/core/_vendor/lark/grammar.py b/src/poetry/core/_vendor/lark/grammar.py new file mode 100644 index 0000000..4f4fa90 --- /dev/null +++ b/src/poetry/core/_vendor/lark/grammar.py @@ -0,0 +1,122 @@ +from typing import Optional, Tuple, ClassVar + +from .utils import Serialize + +###{standalone +TOKEN_DEFAULT_PRIORITY = 0 + + +class Symbol(Serialize): + __slots__ = ('name',) + + name: str + is_term: ClassVar[bool] = NotImplemented + + def __init__(self, name: str) -> None: + self.name = name + + def __eq__(self, other): + assert isinstance(other, Symbol), other + return self.is_term == other.is_term and self.name == other.name + + def __ne__(self, other): + return not (self == other) + + def __hash__(self): + return hash(self.name) + + def __repr__(self): + return '%s(%r)' % (type(self).__name__, self.name) + + fullrepr = property(__repr__) + + def renamed(self, f): + return type(self)(f(self.name)) + + +class Terminal(Symbol): + __serialize_fields__ = 'name', 'filter_out' + + is_term: ClassVar[bool] = True + + def __init__(self, name, filter_out=False): + self.name = name + self.filter_out = filter_out + + @property + def fullrepr(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.filter_out) + + def renamed(self, f): + return type(self)(f(self.name), self.filter_out) + + +class NonTerminal(Symbol): + __serialize_fields__ = 'name', + + is_term: ClassVar[bool] = False + + +class RuleOptions(Serialize): + __serialize_fields__ = 'keep_all_tokens', 'expand1', 'priority', 'template_source', 'empty_indices' + + keep_all_tokens: bool + expand1: bool + priority: Optional[int] + template_source: Optional[str] + empty_indices: Tuple[bool, ...] + + def __init__(self, keep_all_tokens: bool=False, expand1: bool=False, priority: Optional[int]=None, template_source: Optional[str]=None, empty_indices: Tuple[bool, ...]=()) -> None: + self.keep_all_tokens = keep_all_tokens + self.expand1 = expand1 + self.priority = priority + self.template_source = template_source + self.empty_indices = empty_indices + + def __repr__(self): + return 'RuleOptions(%r, %r, %r, %r)' % ( + self.keep_all_tokens, + self.expand1, + self.priority, + self.template_source + ) + + +class Rule(Serialize): + """ + origin : a symbol + expansion : a list of symbols + order : index of this expansion amongst all rules of the same name + """ + __slots__ = ('origin', 'expansion', 'alias', 'options', 'order', '_hash') + + __serialize_fields__ = 'origin', 'expansion', 'order', 'alias', 'options' + __serialize_namespace__ = Terminal, NonTerminal, RuleOptions + + def __init__(self, origin, expansion, order=0, alias=None, options=None): + self.origin = origin + self.expansion = expansion + self.alias = alias + self.order = order + self.options = options or RuleOptions() + self._hash = hash((self.origin, tuple(self.expansion))) + + def _deserialize(self): + self._hash = hash((self.origin, tuple(self.expansion))) + + def __str__(self): + return '<%s : %s>' % (self.origin.name, ' '.join(x.name for x in self.expansion)) + + def __repr__(self): + return 'Rule(%r, %r, %r, %r)' % (self.origin, self.expansion, self.alias, self.options) + + def __hash__(self): + return self._hash + + def __eq__(self, other): + if not isinstance(other, Rule): + return False + return self.origin == other.origin and self.expansion == other.expansion + + +###} diff --git a/src/poetry/core/_vendor/lark/grammars/__init__.py b/src/poetry/core/_vendor/lark/grammars/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/lark/grammars/common.lark b/src/poetry/core/_vendor/lark/grammars/common.lark new file mode 100644 index 0000000..e15b163 --- /dev/null +++ b/src/poetry/core/_vendor/lark/grammars/common.lark @@ -0,0 +1,59 @@ +// Basic terminals for common use + + +// +// Numbers +// + +DIGIT: "0".."9" +HEXDIGIT: "a".."f"|"A".."F"|DIGIT + +INT: DIGIT+ +SIGNED_INT: ["+"|"-"] INT +DECIMAL: INT "." INT? | "." INT + +// float = /-?\d+(\.\d+)?([eE][+-]?\d+)?/ +_EXP: ("e"|"E") SIGNED_INT +FLOAT: INT _EXP | DECIMAL _EXP? +SIGNED_FLOAT: ["+"|"-"] FLOAT + +NUMBER: FLOAT | INT +SIGNED_NUMBER: ["+"|"-"] NUMBER + +// +// Strings +// +_STRING_INNER: /.*?/ +_STRING_ESC_INNER: _STRING_INNER /(? ignore + | "%import" import_path ["->" name] -> import + | "%import" import_path name_list -> multi_import + | "%override" rule -> override_rule + | "%declare" name+ -> declare + +!import_path: "."? name ("." name)* +name_list: "(" name ("," name)* ")" + +?expansions: alias (_VBAR alias)* + +?alias: expansion ["->" RULE] + +?expansion: expr* + +?expr: atom [OP | "~" NUMBER [".." NUMBER]] + +?atom: "(" expansions ")" + | "[" expansions "]" -> maybe + | value + +?value: STRING ".." STRING -> literal_range + | name + | (REGEXP | STRING) -> literal + | name "{" value ("," value)* "}" -> template_usage + +name: RULE + | TOKEN + +_VBAR: _NL? "|" +OP: /[+*]|[?](?![a-z])/ +RULE: /!?[_?]?[a-z][_a-z0-9]*/ +TOKEN: /_?[A-Z][_A-Z0-9]*/ +STRING: _STRING "i"? +REGEXP: /\/(?!\/)(\\\/|\\\\|[^\/])*?\/[imslux]*/ +_NL: /(\r?\n)+\s*/ + +%import common.ESCAPED_STRING -> _STRING +%import common.SIGNED_INT -> NUMBER +%import common.WS_INLINE + +COMMENT: /\s*/ "//" /[^\n]/* + +%ignore WS_INLINE +%ignore COMMENT diff --git a/src/poetry/core/_vendor/lark/grammars/python.lark b/src/poetry/core/_vendor/lark/grammars/python.lark new file mode 100644 index 0000000..5c131a2 --- /dev/null +++ b/src/poetry/core/_vendor/lark/grammars/python.lark @@ -0,0 +1,304 @@ +// Python 3 grammar for Lark + +// This grammar should parse all python 3.x code successfully. + +// Adapted from: https://docs.python.org/3/reference/grammar.html + +// Start symbols for the grammar: +// single_input is a single interactive statement; +// file_input is a module or sequence of commands read from an input file; +// eval_input is the input for the eval() functions. +// NB: compound_stmt in single_input is followed by extra NEWLINE! +// + +single_input: _NEWLINE | simple_stmt | compound_stmt _NEWLINE +file_input: (_NEWLINE | stmt)* +eval_input: testlist _NEWLINE* + +decorator: "@" dotted_name [ "(" [arguments] ")" ] _NEWLINE +decorators: decorator+ +decorated: decorators (classdef | funcdef | async_funcdef) + +async_funcdef: "async" funcdef +funcdef: "def" name "(" [parameters] ")" ["->" test] ":" suite + +parameters: paramvalue ("," paramvalue)* ["," SLASH ("," paramvalue)*] ["," [starparams | kwparams]] + | starparams + | kwparams + +SLASH: "/" // Otherwise the it will completely disappear and it will be undisguisable in the result +starparams: (starparam | starguard) poststarparams +starparam: "*" typedparam +starguard: "*" +poststarparams: ("," paramvalue)* ["," kwparams] +kwparams: "**" typedparam ","? + +?paramvalue: typedparam ("=" test)? +?typedparam: name (":" test)? + + +lambdef: "lambda" [lambda_params] ":" test +lambdef_nocond: "lambda" [lambda_params] ":" test_nocond +lambda_params: lambda_paramvalue ("," lambda_paramvalue)* ["," [lambda_starparams | lambda_kwparams]] + | lambda_starparams + | lambda_kwparams +?lambda_paramvalue: name ("=" test)? +lambda_starparams: "*" [name] ("," lambda_paramvalue)* ["," [lambda_kwparams]] +lambda_kwparams: "**" name ","? + + +?stmt: simple_stmt | compound_stmt +?simple_stmt: small_stmt (";" small_stmt)* [";"] _NEWLINE +?small_stmt: (expr_stmt | assign_stmt | del_stmt | pass_stmt | flow_stmt | import_stmt | global_stmt | nonlocal_stmt | assert_stmt) +expr_stmt: testlist_star_expr +assign_stmt: annassign | augassign | assign + +annassign: testlist_star_expr ":" test ["=" test] +assign: testlist_star_expr ("=" (yield_expr|testlist_star_expr))+ +augassign: testlist_star_expr augassign_op (yield_expr|testlist) +!augassign_op: "+=" | "-=" | "*=" | "@=" | "/=" | "%=" | "&=" | "|=" | "^=" | "<<=" | ">>=" | "**=" | "//=" +?testlist_star_expr: test_or_star_expr + | test_or_star_expr ("," test_or_star_expr)+ ","? -> tuple + | test_or_star_expr "," -> tuple + +// For normal and annotated assignments, additional restrictions enforced by the interpreter +del_stmt: "del" exprlist +pass_stmt: "pass" +?flow_stmt: break_stmt | continue_stmt | return_stmt | raise_stmt | yield_stmt +break_stmt: "break" +continue_stmt: "continue" +return_stmt: "return" [testlist] +yield_stmt: yield_expr +raise_stmt: "raise" [test ["from" test]] +import_stmt: import_name | import_from +import_name: "import" dotted_as_names +// note below: the ("." | "...") is necessary because "..." is tokenized as ELLIPSIS +import_from: "from" (dots? dotted_name | dots) "import" ("*" | "(" import_as_names ")" | import_as_names) +!dots: "."+ +import_as_name: name ["as" name] +dotted_as_name: dotted_name ["as" name] +import_as_names: import_as_name ("," import_as_name)* [","] +dotted_as_names: dotted_as_name ("," dotted_as_name)* +dotted_name: name ("." name)* +global_stmt: "global" name ("," name)* +nonlocal_stmt: "nonlocal" name ("," name)* +assert_stmt: "assert" test ["," test] + +?compound_stmt: if_stmt | while_stmt | for_stmt | try_stmt | match_stmt + | with_stmt | funcdef | classdef | decorated | async_stmt +async_stmt: "async" (funcdef | with_stmt | for_stmt) +if_stmt: "if" test ":" suite elifs ["else" ":" suite] +elifs: elif_* +elif_: "elif" test ":" suite +while_stmt: "while" test ":" suite ["else" ":" suite] +for_stmt: "for" exprlist "in" testlist ":" suite ["else" ":" suite] +try_stmt: "try" ":" suite except_clauses ["else" ":" suite] [finally] + | "try" ":" suite finally -> try_finally +finally: "finally" ":" suite +except_clauses: except_clause+ +except_clause: "except" [test ["as" name]] ":" suite +// NB compile.c makes sure that the default except clause is last + + +with_stmt: "with" with_items ":" suite +with_items: with_item ("," with_item)* +with_item: test ["as" name] + +match_stmt: "match" test ":" _NEWLINE _INDENT case+ _DEDENT + +case: "case" pattern ["if" test] ":" suite + +?pattern: sequence_item_pattern "," _sequence_pattern -> sequence_pattern + | as_pattern +?as_pattern: or_pattern ("as" NAME)? +?or_pattern: closed_pattern ("|" closed_pattern)* +?closed_pattern: literal_pattern + | NAME -> capture_pattern + | "_" -> any_pattern + | attr_pattern + | "(" as_pattern ")" + | "[" _sequence_pattern "]" -> sequence_pattern + | "(" (sequence_item_pattern "," _sequence_pattern)? ")" -> sequence_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ","?)?"}" -> mapping_pattern + | "{" (mapping_item_pattern ("," mapping_item_pattern)* ",")? "**" NAME ","? "}" -> mapping_star_pattern + | class_pattern + +literal_pattern: inner_literal_pattern + +?inner_literal_pattern: "None" -> const_none + | "True" -> const_true + | "False" -> const_false + | STRING -> string + | number + +attr_pattern: NAME ("." NAME)+ -> value + +name_or_attr_pattern: NAME ("." NAME)* -> value + +mapping_item_pattern: (literal_pattern|attr_pattern) ":" as_pattern + +_sequence_pattern: (sequence_item_pattern ("," sequence_item_pattern)* ","?)? +?sequence_item_pattern: as_pattern + | "*" NAME -> star_pattern + +class_pattern: name_or_attr_pattern "(" [arguments_pattern ","?] ")" +arguments_pattern: pos_arg_pattern ["," keyws_arg_pattern] + | keyws_arg_pattern -> no_pos_arguments + +pos_arg_pattern: as_pattern ("," as_pattern)* +keyws_arg_pattern: keyw_arg_pattern ("," keyw_arg_pattern)* +keyw_arg_pattern: NAME "=" as_pattern + + + +suite: simple_stmt | _NEWLINE _INDENT stmt+ _DEDENT + +?test: or_test ("if" or_test "else" test)? + | lambdef + | assign_expr + +assign_expr: name ":=" test + +?test_nocond: or_test | lambdef_nocond + +?or_test: and_test ("or" and_test)* +?and_test: not_test_ ("and" not_test_)* +?not_test_: "not" not_test_ -> not_test + | comparison +?comparison: expr (comp_op expr)* +star_expr: "*" expr + +?expr: or_expr +?or_expr: xor_expr ("|" xor_expr)* +?xor_expr: and_expr ("^" and_expr)* +?and_expr: shift_expr ("&" shift_expr)* +?shift_expr: arith_expr (_shift_op arith_expr)* +?arith_expr: term (_add_op term)* +?term: factor (_mul_op factor)* +?factor: _unary_op factor | power + +!_unary_op: "+"|"-"|"~" +!_add_op: "+"|"-" +!_shift_op: "<<"|">>" +!_mul_op: "*"|"@"|"/"|"%"|"//" +// <> isn't actually a valid comparison operator in Python. It's here for the +// sake of a __future__ import described in PEP 401 (which really works :-) +!comp_op: "<"|">"|"=="|">="|"<="|"<>"|"!="|"in"|"not" "in"|"is"|"is" "not" + +?power: await_expr ("**" factor)? +?await_expr: AWAIT? atom_expr +AWAIT: "await" + +?atom_expr: atom_expr "(" [arguments] ")" -> funccall + | atom_expr "[" subscriptlist "]" -> getitem + | atom_expr "." name -> getattr + | atom + +?atom: "(" yield_expr ")" + | "(" _tuple_inner? ")" -> tuple + | "(" comprehension{test_or_star_expr} ")" -> tuple_comprehension + | "[" _testlist_comp? "]" -> list + | "[" comprehension{test_or_star_expr} "]" -> list_comprehension + | "{" _dict_exprlist? "}" -> dict + | "{" comprehension{key_value} "}" -> dict_comprehension + | "{" _set_exprlist "}" -> set + | "{" comprehension{test} "}" -> set_comprehension + | name -> var + | number + | string_concat + | "(" test ")" + | "..." -> ellipsis + | "None" -> const_none + | "True" -> const_true + | "False" -> const_false + + +?string_concat: string+ + +_testlist_comp: test | _tuple_inner +_tuple_inner: test_or_star_expr (("," test_or_star_expr)+ [","] | ",") + + +?test_or_star_expr: test + | star_expr + +?subscriptlist: subscript + | subscript (("," subscript)+ [","] | ",") -> subscript_tuple +?subscript: test | ([test] ":" [test] [sliceop]) -> slice +sliceop: ":" [test] +?exprlist: (expr|star_expr) + | (expr|star_expr) (("," (expr|star_expr))+ [","]|",") +?testlist: test | testlist_tuple +testlist_tuple: test (("," test)+ [","] | ",") +_dict_exprlist: (key_value | "**" expr) ("," (key_value | "**" expr))* [","] + +key_value: test ":" test + +_set_exprlist: test_or_star_expr ("," test_or_star_expr)* [","] + +classdef: "class" name ["(" [arguments] ")"] ":" suite + + + +arguments: argvalue ("," argvalue)* ("," [ starargs | kwargs])? + | starargs + | kwargs + | comprehension{test} + +starargs: stararg ("," stararg)* ("," argvalue)* ["," kwargs] +stararg: "*" test +kwargs: "**" test ("," argvalue)* + +?argvalue: test ("=" test)? + + +comprehension{comp_result}: comp_result comp_fors [comp_if] +comp_fors: comp_for+ +comp_for: [ASYNC] "for" exprlist "in" or_test +ASYNC: "async" +?comp_if: "if" test_nocond + +// not used in grammar, but may appear in "node" passed from Parser to Compiler +encoding_decl: name + +yield_expr: "yield" [testlist] + | "yield" "from" test -> yield_from + +number: DEC_NUMBER | HEX_NUMBER | BIN_NUMBER | OCT_NUMBER | FLOAT_NUMBER | IMAG_NUMBER +string: STRING | LONG_STRING + +// Other terminals + +_NEWLINE: ( /\r?\n[\t ]*/ | COMMENT )+ + +%ignore /[\t \f]+/ // WS +%ignore /\\[\t \f]*\r?\n/ // LINE_CONT +%ignore COMMENT +%declare _INDENT _DEDENT + + +// Python terminals + +!name: NAME | "match" | "case" +NAME: /[^\W\d]\w*/ +COMMENT: /#[^\n]*/ + +STRING: /([ubf]?r?|r[ubf])("(?!"").*?(? None: + self.paren_level = 0 + self.indent_level = [0] + assert self.tab_len > 0 + + def handle_NL(self, token: Token) -> Iterator[Token]: + if self.paren_level > 0: + return + + yield token + + indent_str = token.rsplit('\n', 1)[1] # Tabs and spaces + indent = indent_str.count(' ') + indent_str.count('\t') * self.tab_len + + if indent > self.indent_level[-1]: + self.indent_level.append(indent) + yield Token.new_borrow_pos(self.INDENT_type, indent_str, token) + else: + while indent < self.indent_level[-1]: + self.indent_level.pop() + yield Token.new_borrow_pos(self.DEDENT_type, indent_str, token) + + if indent != self.indent_level[-1]: + raise DedentError('Unexpected dedent to column %s. Expected dedent to %s' % (indent, self.indent_level[-1])) + + def _process(self, stream): + for token in stream: + if token.type == self.NL_type: + yield from self.handle_NL(token) + else: + yield token + + if token.type in self.OPEN_PAREN_types: + self.paren_level += 1 + elif token.type in self.CLOSE_PAREN_types: + self.paren_level -= 1 + assert self.paren_level >= 0 + + while len(self.indent_level) > 1: + self.indent_level.pop() + yield Token(self.DEDENT_type, '') + + assert self.indent_level == [0], self.indent_level + + def process(self, stream): + self.paren_level = 0 + self.indent_level = [0] + return self._process(stream) + + # XXX Hack for ContextualLexer. Maybe there's a more elegant solution? + @property + def always_accept(self): + return (self.NL_type,) + + @property + @abstractmethod + def NL_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def OPEN_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def CLOSE_PAREN_types(self) -> List[str]: + raise NotImplementedError() + + @property + @abstractmethod + def INDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def DEDENT_type(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def tab_len(self) -> int: + raise NotImplementedError() + + +class PythonIndenter(Indenter): + NL_type = '_NEWLINE' + OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE'] + CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE'] + INDENT_type = '_INDENT' + DEDENT_type = '_DEDENT' + tab_len = 8 + +###} diff --git a/src/poetry/core/_vendor/lark/lark.py b/src/poetry/core/_vendor/lark/lark.py new file mode 100644 index 0000000..c93e9e1 --- /dev/null +++ b/src/poetry/core/_vendor/lark/lark.py @@ -0,0 +1,648 @@ +from abc import ABC, abstractmethod +import getpass +import sys, os, pickle +import tempfile +import types +import re +from typing import ( + TypeVar, Type, List, Dict, Iterator, Callable, Union, Optional, Sequence, + Tuple, Iterable, IO, Any, TYPE_CHECKING, Collection +) +if TYPE_CHECKING: + from .parsers.lalr_interactive_parser import InteractiveParser + from .tree import ParseTree + from .visitors import Transformer + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + from .parser_frontends import ParsingFrontend + +from .exceptions import ConfigurationError, assert_config, UnexpectedInput +from .utils import Serialize, SerializeMemoizer, FS, isascii, logger +from .load_grammar import load_grammar, FromPackageLoader, Grammar, verify_used_files, PackageResource, md5_digest +from .tree import Tree +from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType + +from .lexer import Lexer, BasicLexer, TerminalDef, LexerThread, Token +from .parse_tree_builder import ParseTreeBuilder +from .parser_frontends import _validate_frontend_args, _get_lexer_callbacks, _deserialize_parsing_frontend, _construct_parsing_frontend +from .grammar import Rule + + +try: + import regex + _has_regex = True +except ImportError: + _has_regex = False + + +###{standalone + + +class PostLex(ABC): + @abstractmethod + def process(self, stream: Iterator[Token]) -> Iterator[Token]: + return stream + + always_accept: Iterable[str] = () + +class LarkOptions(Serialize): + """Specifies the options for Lark + + """ + + start: List[str] + debug: bool + transformer: 'Optional[Transformer]' + propagate_positions: Union[bool, str] + maybe_placeholders: bool + cache: Union[bool, str] + regex: bool + g_regex_flags: int + keep_all_tokens: bool + tree_class: Any + parser: _ParserArgType + lexer: _LexerArgType + ambiguity: 'Literal["auto", "resolve", "explicit", "forest"]' + postlex: Optional[PostLex] + priority: 'Optional[Literal["auto", "normal", "invert"]]' + lexer_callbacks: Dict[str, Callable[[Token], Token]] + use_bytes: bool + edit_terminals: Optional[Callable[[TerminalDef], TerminalDef]] + import_paths: 'List[Union[str, Callable[[Union[None, str, PackageResource], str], Tuple[str, str]]]]' + source_path: Optional[str] + + OPTIONS_DOC = """ + **=== General Options ===** + + start + The start symbol. Either a string, or a list of strings for multiple possible starts (Default: "start") + debug + Display debug information and extra warnings. Use only when debugging (Default: ``False``) + When used with Earley, it generates a forest graph as "sppf.png", if 'dot' is installed. + transformer + Applies the transformer to every parse tree (equivalent to applying it after the parse, but faster) + propagate_positions + Propagates (line, column, end_line, end_column) attributes into all tree branches. + Accepts ``False``, ``True``, or a callable, which will filter which nodes to ignore when propagating. + maybe_placeholders + When ``True``, the ``[]`` operator returns ``None`` when not matched. + When ``False``, ``[]`` behaves like the ``?`` operator, and returns no value at all. + (default= ``True``) + cache + Cache the results of the Lark grammar analysis, for x2 to x3 faster loading. LALR only for now. + + - When ``False``, does nothing (default) + - When ``True``, caches to a temporary file in the local directory + - When given a string, caches to the path pointed by the string + regex + When True, uses the ``regex`` module instead of the stdlib ``re``. + g_regex_flags + Flags that are applied to all terminals (both regex and strings) + keep_all_tokens + Prevent the tree builder from automagically removing "punctuation" tokens (Default: ``False``) + tree_class + Lark will produce trees comprised of instances of this class instead of the default ``lark.Tree``. + + **=== Algorithm Options ===** + + parser + Decides which parser engine to use. Accepts "earley" or "lalr". (Default: "earley"). + (there is also a "cyk" option for legacy) + lexer + Decides whether or not to use a lexer stage + + - "auto" (default): Choose for me based on the parser + - "basic": Use a basic lexer + - "contextual": Stronger lexer (only works with parser="lalr") + - "dynamic": Flexible and powerful (only with parser="earley") + - "dynamic_complete": Same as dynamic, but tries *every* variation of tokenizing possible. + ambiguity + Decides how to handle ambiguity in the parse. Only relevant if parser="earley" + + - "resolve": The parser will automatically choose the simplest derivation + (it chooses consistently: greedy for tokens, non-greedy for rules) + - "explicit": The parser will return all derivations wrapped in "_ambig" tree nodes (i.e. a forest). + - "forest": The parser will return the root of the shared packed parse forest. + + **=== Misc. / Domain Specific Options ===** + + postlex + Lexer post-processing (Default: ``None``) Only works with the basic and contextual lexers. + priority + How priorities should be evaluated - "auto", ``None``, "normal", "invert" (Default: "auto") + lexer_callbacks + Dictionary of callbacks for the lexer. May alter tokens during lexing. Use with caution. + use_bytes + Accept an input of type ``bytes`` instead of ``str``. + edit_terminals + A callback for editing the terminals before parse. + import_paths + A List of either paths or loader functions to specify from where grammars are imported + source_path + Override the source of from where the grammar was loaded. Useful for relative imports and unconventional grammar loading + **=== End of Options ===** + """ + if __doc__: + __doc__ += OPTIONS_DOC + + + # Adding a new option needs to be done in multiple places: + # - In the dictionary below. This is the primary truth of which options `Lark.__init__` accepts + # - In the docstring above. It is used both for the docstring of `LarkOptions` and `Lark`, and in readthedocs + # - As an attribute of `LarkOptions` above + # - Potentially in `_LOAD_ALLOWED_OPTIONS` below this class, when the option doesn't change how the grammar is loaded + # - Potentially in `lark.tools.__init__`, if it makes sense, and it can easily be passed as a cmd argument + _defaults: Dict[str, Any] = { + 'debug': False, + 'keep_all_tokens': False, + 'tree_class': None, + 'cache': False, + 'postlex': None, + 'parser': 'earley', + 'lexer': 'auto', + 'transformer': None, + 'start': 'start', + 'priority': 'auto', + 'ambiguity': 'auto', + 'regex': False, + 'propagate_positions': False, + 'lexer_callbacks': {}, + 'maybe_placeholders': True, + 'edit_terminals': None, + 'g_regex_flags': 0, + 'use_bytes': False, + 'import_paths': [], + 'source_path': None, + '_plugins': {}, + } + + def __init__(self, options_dict: Dict[str, Any]) -> None: + o = dict(options_dict) + + options = {} + for name, default in self._defaults.items(): + if name in o: + value = o.pop(name) + if isinstance(default, bool) and name not in ('cache', 'use_bytes', 'propagate_positions'): + value = bool(value) + else: + value = default + + options[name] = value + + if isinstance(options['start'], str): + options['start'] = [options['start']] + + self.__dict__['options'] = options + + + assert_config(self.parser, ('earley', 'lalr', 'cyk', None)) + + if self.parser == 'earley' and self.transformer: + raise ConfigurationError('Cannot specify an embedded transformer when using the Earley algorithm. ' + 'Please use your transformer on the resulting parse tree, or use a different algorithm (i.e. LALR)') + + if o: + raise ConfigurationError("Unknown options: %s" % o.keys()) + + def __getattr__(self, name: str) -> Any: + try: + return self.__dict__['options'][name] + except KeyError as e: + raise AttributeError(e) + + def __setattr__(self, name: str, value: str) -> None: + assert_config(name, self.options.keys(), "%r isn't a valid option. Expected one of: %s") + self.options[name] = value + + def serialize(self, memo = None) -> Dict[str, Any]: + return self.options + + @classmethod + def deserialize(cls, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]]) -> "LarkOptions": + return cls(data) + + +# Options that can be passed to the Lark parser, even when it was loaded from cache/standalone. +# These options are only used outside of `load_grammar`. +_LOAD_ALLOWED_OPTIONS = {'postlex', 'transformer', 'lexer_callbacks', 'use_bytes', 'debug', 'g_regex_flags', 'regex', 'propagate_positions', 'tree_class', '_plugins'} + +_VALID_PRIORITY_OPTIONS = ('auto', 'normal', 'invert', None) +_VALID_AMBIGUITY_OPTIONS = ('auto', 'resolve', 'explicit', 'forest') + + +_T = TypeVar('_T', bound="Lark") + +class Lark(Serialize): + """Main interface for the library. + + It's mostly a thin wrapper for the many different parsers, and for the tree constructor. + + Parameters: + grammar: a string or file-object containing the grammar spec (using Lark's ebnf syntax) + options: a dictionary controlling various aspects of Lark. + + Example: + >>> Lark(r'''start: "foo" ''') + Lark(...) + """ + + source_path: str + source_grammar: str + grammar: 'Grammar' + options: LarkOptions + lexer: Lexer + terminals: Collection[TerminalDef] + + def __init__(self, grammar: 'Union[Grammar, str, IO[str]]', **options) -> None: + self.options = LarkOptions(options) + re_module: types.ModuleType + + # Set regex or re module + use_regex = self.options.regex + if use_regex: + if _has_regex: + re_module = regex + else: + raise ImportError('`regex` module must be installed if calling `Lark(regex=True)`.') + else: + re_module = re + + # Some, but not all file-like objects have a 'name' attribute + if self.options.source_path is None: + try: + self.source_path = grammar.name # type: ignore[union-attr] + except AttributeError: + self.source_path = '' + else: + self.source_path = self.options.source_path + + # Drain file-like objects to get their contents + try: + read = grammar.read # type: ignore[union-attr] + except AttributeError: + pass + else: + grammar = read() + + cache_fn = None + cache_md5 = None + if isinstance(grammar, str): + self.source_grammar = grammar + if self.options.use_bytes: + if not isascii(grammar): + raise ConfigurationError("Grammar must be ascii only, when use_bytes=True") + + if self.options.cache: + if self.options.parser != 'lalr': + raise ConfigurationError("cache only works with parser='lalr' for now") + + unhashable = ('transformer', 'postlex', 'lexer_callbacks', 'edit_terminals', '_plugins') + options_str = ''.join(k+str(v) for k, v in options.items() if k not in unhashable) + from . import __version__ + s = grammar + options_str + __version__ + str(sys.version_info[:2]) + cache_md5 = md5_digest(s) + + if isinstance(self.options.cache, str): + cache_fn = self.options.cache + else: + if self.options.cache is not True: + raise ConfigurationError("cache argument must be bool or str") + + try: + username = getpass.getuser() + except Exception: + # The exception raised may be ImportError or OSError in + # the future. For the cache, we don't care about the + # specific reason - we just want a username. + username = "unknown" + + cache_fn = tempfile.gettempdir() + "/.lark_cache_%s_%s_%s_%s.tmp" % (username, cache_md5, *sys.version_info[:2]) + + old_options = self.options + try: + with FS.open(cache_fn, 'rb') as f: + logger.debug('Loading grammar from cache: %s', cache_fn) + # Remove options that aren't relevant for loading from cache + for name in (set(options) - _LOAD_ALLOWED_OPTIONS): + del options[name] + file_md5 = f.readline().rstrip(b'\n') + cached_used_files = pickle.load(f) + if file_md5 == cache_md5.encode('utf8') and verify_used_files(cached_used_files): + cached_parser_data = pickle.load(f) + self._load(cached_parser_data, **options) + return + except FileNotFoundError: + # The cache file doesn't exist; parse and compose the grammar as normal + pass + except Exception: # We should probably narrow done which errors we catch here. + logger.exception("Failed to load Lark from cache: %r. We will try to carry on.", cache_fn) + + # In theory, the Lark instance might have been messed up by the call to `_load`. + # In practice the only relevant thing that might have been overwritten should be `options` + self.options = old_options + + + # Parse the grammar file and compose the grammars + self.grammar, used_files = load_grammar(grammar, self.source_path, self.options.import_paths, self.options.keep_all_tokens) + else: + assert isinstance(grammar, Grammar) + self.grammar = grammar + + + if self.options.lexer == 'auto': + if self.options.parser == 'lalr': + self.options.lexer = 'contextual' + elif self.options.parser == 'earley': + if self.options.postlex is not None: + logger.info("postlex can't be used with the dynamic lexer, so we use 'basic' instead. " + "Consider using lalr with contextual instead of earley") + self.options.lexer = 'basic' + else: + self.options.lexer = 'dynamic' + elif self.options.parser == 'cyk': + self.options.lexer = 'basic' + else: + assert False, self.options.parser + lexer = self.options.lexer + if isinstance(lexer, type): + assert issubclass(lexer, Lexer) # XXX Is this really important? Maybe just ensure interface compliance + else: + assert_config(lexer, ('basic', 'contextual', 'dynamic', 'dynamic_complete')) + if self.options.postlex is not None and 'dynamic' in lexer: + raise ConfigurationError("Can't use postlex with a dynamic lexer. Use basic or contextual instead") + + if self.options.ambiguity == 'auto': + if self.options.parser == 'earley': + self.options.ambiguity = 'resolve' + else: + assert_config(self.options.parser, ('earley', 'cyk'), "%r doesn't support disambiguation. Use one of these parsers instead: %s") + + if self.options.priority == 'auto': + self.options.priority = 'normal' + + if self.options.priority not in _VALID_PRIORITY_OPTIONS: + raise ConfigurationError("invalid priority option: %r. Must be one of %r" % (self.options.priority, _VALID_PRIORITY_OPTIONS)) + if self.options.ambiguity not in _VALID_AMBIGUITY_OPTIONS: + raise ConfigurationError("invalid ambiguity option: %r. Must be one of %r" % (self.options.ambiguity, _VALID_AMBIGUITY_OPTIONS)) + + if self.options.parser is None: + terminals_to_keep = '*' + elif self.options.postlex is not None: + terminals_to_keep = set(self.options.postlex.always_accept) + else: + terminals_to_keep = set() + + # Compile the EBNF grammar into BNF + self.terminals, self.rules, self.ignore_tokens = self.grammar.compile(self.options.start, terminals_to_keep) + + if self.options.edit_terminals: + for t in self.terminals: + self.options.edit_terminals(t) + + self._terminals_dict = {t.name: t for t in self.terminals} + + # If the user asked to invert the priorities, negate them all here. + if self.options.priority == 'invert': + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = -rule.options.priority + for term in self.terminals: + term.priority = -term.priority + # Else, if the user asked to disable priorities, strip them from the + # rules and terminals. This allows the Earley parsers to skip an extra forest walk + # for improved performance, if you don't need them (or didn't specify any). + elif self.options.priority is None: + for rule in self.rules: + if rule.options.priority is not None: + rule.options.priority = None + for term in self.terminals: + term.priority = 0 + + # TODO Deprecate lexer_callbacks? + self.lexer_conf = LexerConf( + self.terminals, re_module, self.ignore_tokens, self.options.postlex, + self.options.lexer_callbacks, self.options.g_regex_flags, use_bytes=self.options.use_bytes + ) + + if self.options.parser: + self.parser = self._build_parser() + elif lexer: + self.lexer = self._build_lexer() + + if cache_fn: + logger.debug('Saving grammar to cache: %s', cache_fn) + try: + with FS.open(cache_fn, 'wb') as f: + assert cache_md5 is not None + f.write(cache_md5.encode('utf8') + b'\n') + pickle.dump(used_files, f) + self.save(f, _LOAD_ALLOWED_OPTIONS) + except IOError as e: + logger.exception("Failed to save Lark to cache: %r.", cache_fn, e) + + if __doc__: + __doc__ += "\n\n" + LarkOptions.OPTIONS_DOC + + __serialize_fields__ = 'parser', 'rules', 'options' + + def _build_lexer(self, dont_ignore: bool=False) -> BasicLexer: + lexer_conf = self.lexer_conf + if dont_ignore: + from copy import copy + lexer_conf = copy(lexer_conf) + lexer_conf.ignore = () + return BasicLexer(lexer_conf) + + def _prepare_callbacks(self) -> None: + self._callbacks = {} + # we don't need these callbacks if we aren't building a tree + if self.options.ambiguity != 'forest': + self._parse_tree_builder = ParseTreeBuilder( + self.rules, + self.options.tree_class or Tree, + self.options.propagate_positions, + self.options.parser != 'lalr' and self.options.ambiguity == 'explicit', + self.options.maybe_placeholders + ) + self._callbacks = self._parse_tree_builder.create_callback(self.options.transformer) + self._callbacks.update(_get_lexer_callbacks(self.options.transformer, self.terminals)) + + def _build_parser(self) -> "ParsingFrontend": + self._prepare_callbacks() + _validate_frontend_args(self.options.parser, self.options.lexer) + parser_conf = ParserConf(self.rules, self._callbacks, self.options.start) + return _construct_parsing_frontend( + self.options.parser, + self.options.lexer, + self.lexer_conf, + parser_conf, + options=self.options + ) + + def save(self, f, exclude_options: Collection[str] = ()) -> None: + """Saves the instance into the given file object + + Useful for caching and multiprocessing. + """ + data, m = self.memo_serialize([TerminalDef, Rule]) + if exclude_options: + data["options"] = {n: v for n, v in data["options"].items() if n not in exclude_options} + pickle.dump({'data': data, 'memo': m}, f, protocol=pickle.HIGHEST_PROTOCOL) + + @classmethod + def load(cls: Type[_T], f) -> _T: + """Loads an instance from the given file object + + Useful for caching and multiprocessing. + """ + inst = cls.__new__(cls) + return inst._load(f) + + def _deserialize_lexer_conf(self, data: Dict[str, Any], memo: Dict[int, Union[TerminalDef, Rule]], options: LarkOptions) -> LexerConf: + lexer_conf = LexerConf.deserialize(data['lexer_conf'], memo) + lexer_conf.callbacks = options.lexer_callbacks or {} + lexer_conf.re_module = regex if options.regex else re + lexer_conf.use_bytes = options.use_bytes + lexer_conf.g_regex_flags = options.g_regex_flags + lexer_conf.skip_validation = True + lexer_conf.postlex = options.postlex + return lexer_conf + + def _load(self: _T, f: Any, **kwargs) -> _T: + if isinstance(f, dict): + d = f + else: + d = pickle.load(f) + memo_json = d['memo'] + data = d['data'] + + assert memo_json + memo = SerializeMemoizer.deserialize(memo_json, {'Rule': Rule, 'TerminalDef': TerminalDef}, {}) + options = dict(data['options']) + if (set(kwargs) - _LOAD_ALLOWED_OPTIONS) & set(LarkOptions._defaults): + raise ConfigurationError("Some options are not allowed when loading a Parser: {}" + .format(set(kwargs) - _LOAD_ALLOWED_OPTIONS)) + options.update(kwargs) + self.options = LarkOptions.deserialize(options, memo) + self.rules = [Rule.deserialize(r, memo) for r in data['rules']] + self.source_path = '' + _validate_frontend_args(self.options.parser, self.options.lexer) + self.lexer_conf = self._deserialize_lexer_conf(data['parser'], memo, self.options) + self.terminals = self.lexer_conf.terminals + self._prepare_callbacks() + self._terminals_dict = {t.name: t for t in self.terminals} + self.parser = _deserialize_parsing_frontend( + data['parser'], + memo, + self.lexer_conf, + self._callbacks, + self.options, # Not all, but multiple attributes are used + ) + return self + + @classmethod + def _load_from_dict(cls, data, memo, **kwargs): + inst = cls.__new__(cls) + return inst._load({'data': data, 'memo': memo}, **kwargs) + + @classmethod + def open(cls: Type[_T], grammar_filename: str, rel_to: Optional[str]=None, **options) -> _T: + """Create an instance of Lark with the grammar given by its filename + + If ``rel_to`` is provided, the function will find the grammar filename in relation to it. + + Example: + + >>> Lark.open("grammar_file.lark", rel_to=__file__, parser="lalr") + Lark(...) + + """ + if rel_to: + basepath = os.path.dirname(rel_to) + grammar_filename = os.path.join(basepath, grammar_filename) + with open(grammar_filename, encoding='utf8') as f: + return cls(f, **options) + + @classmethod + def open_from_package(cls: Type[_T], package: str, grammar_path: str, search_paths: 'Sequence[str]'=[""], **options) -> _T: + """Create an instance of Lark with the grammar loaded from within the package `package`. + This allows grammar loading from zipapps. + + Imports in the grammar will use the `package` and `search_paths` provided, through `FromPackageLoader` + + Example: + + Lark.open_from_package(__name__, "example.lark", ("grammars",), parser=...) + """ + package_loader = FromPackageLoader(package, search_paths) + full_path, text = package_loader(None, grammar_path) + options.setdefault('source_path', full_path) + options.setdefault('import_paths', []) + options['import_paths'].append(package_loader) + return cls(text, **options) + + def __repr__(self): + return 'Lark(open(%r), parser=%r, lexer=%r, ...)' % (self.source_path, self.options.parser, self.options.lexer) + + + def lex(self, text: str, dont_ignore: bool=False) -> Iterator[Token]: + """Only lex (and postlex) the text, without parsing it. Only relevant when lexer='basic' + + When dont_ignore=True, the lexer will return all tokens, even those marked for %ignore. + + :raises UnexpectedCharacters: In case the lexer cannot find a suitable match. + """ + lexer: Lexer + if not hasattr(self, 'lexer') or dont_ignore: + lexer = self._build_lexer(dont_ignore) + else: + lexer = self.lexer + lexer_thread = LexerThread.from_text(lexer, text) + stream = lexer_thread.lex(None) + if self.options.postlex: + return self.options.postlex.process(stream) + return stream + + def get_terminal(self, name: str) -> TerminalDef: + """Get information about a terminal""" + return self._terminals_dict[name] + + def parse_interactive(self, text: Optional[str]=None, start: Optional[str]=None) -> 'InteractiveParser': + """Start an interactive parsing session. + + Parameters: + text (str, optional): Text to be parsed. Required for ``resume_parse()``. + start (str, optional): Start symbol + + Returns: + A new InteractiveParser instance. + + See Also: ``Lark.parse()`` + """ + return self.parser.parse_interactive(text, start=start) + + def parse(self, text: str, start: Optional[str]=None, on_error: 'Optional[Callable[[UnexpectedInput], bool]]'=None) -> 'ParseTree': + """Parse the given text, according to the options provided. + + Parameters: + text (str): Text to be parsed. + start (str, optional): Required if Lark was given multiple possible start symbols (using the start option). + on_error (function, optional): if provided, will be called on UnexpectedToken error. Return true to resume parsing. + LALR only. See examples/advanced/error_handling.py for an example of how to use on_error. + + Returns: + If a transformer is supplied to ``__init__``, returns whatever is the + result of the transformation. Otherwise, returns a Tree instance. + + :raises UnexpectedInput: On a parse error, one of these sub-exceptions will rise: + ``UnexpectedCharacters``, ``UnexpectedToken``, or ``UnexpectedEOF``. + For convenience, these sub-exceptions also inherit from ``ParserError`` and ``LexerError``. + + """ + return self.parser.parse(text, start=start, on_error=on_error) + + +###} diff --git a/src/poetry/core/_vendor/lark/lexer.py b/src/poetry/core/_vendor/lark/lexer.py new file mode 100644 index 0000000..5e6d6d4 --- /dev/null +++ b/src/poetry/core/_vendor/lark/lexer.py @@ -0,0 +1,603 @@ +# Lexer Implementation + +from abc import abstractmethod, ABC +import re +from contextlib import suppress +from typing import ( + TypeVar, Type, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + Pattern as REPattern, ClassVar, TYPE_CHECKING, overload +) +from types import ModuleType +import warnings +if TYPE_CHECKING: + from .common import LexerConf + +from .utils import classify, get_regexp_width, Serialize +from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken +from .grammar import TOKEN_DEFAULT_PRIORITY + +###{standalone +from copy import copy + + +class Pattern(Serialize, ABC): + + value: str + flags: Collection[str] + raw: Optional[str] + type: ClassVar[str] + + def __init__(self, value: str, flags: Collection[str]=(), raw: Optional[str]=None) -> None: + self.value = value + self.flags = frozenset(flags) + self.raw = raw + + def __repr__(self): + return repr(self.to_regexp()) + + # Pattern Hashing assumes all subclasses have a different priority! + def __hash__(self): + return hash((type(self), self.value, self.flags)) + + def __eq__(self, other): + return type(self) == type(other) and self.value == other.value and self.flags == other.flags + + @abstractmethod + def to_regexp(self) -> str: + raise NotImplementedError() + + @property + @abstractmethod + def min_width(self) -> int: + raise NotImplementedError() + + @property + @abstractmethod + def max_width(self) -> int: + raise NotImplementedError() + + def _get_flags(self, value): + for f in self.flags: + value = ('(?%s:%s)' % (f, value)) + return value + + +class PatternStr(Pattern): + __serialize_fields__ = 'value', 'flags' + + type: ClassVar[str] = "str" + + def to_regexp(self) -> str: + return self._get_flags(re.escape(self.value)) + + @property + def min_width(self) -> int: + return len(self.value) + + @property + def max_width(self) -> int: + return len(self.value) + + +class PatternRE(Pattern): + __serialize_fields__ = 'value', 'flags', '_width' + + type: ClassVar[str] = "re" + + def to_regexp(self) -> str: + return self._get_flags(self.value) + + _width = None + def _get_width(self): + if self._width is None: + self._width = get_regexp_width(self.to_regexp()) + return self._width + + @property + def min_width(self) -> int: + return self._get_width()[0] + + @property + def max_width(self) -> int: + return self._get_width()[1] + + +class TerminalDef(Serialize): + __serialize_fields__ = 'name', 'pattern', 'priority' + __serialize_namespace__ = PatternStr, PatternRE + + name: str + pattern: Pattern + priority: int + + def __init__(self, name: str, pattern: Pattern, priority: int=TOKEN_DEFAULT_PRIORITY) -> None: + assert isinstance(pattern, Pattern), pattern + self.name = name + self.pattern = pattern + self.priority = priority + + def __repr__(self): + return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern) + + def user_repr(self) -> str: + if self.name.startswith('__'): # We represent a generated terminal + return self.pattern.raw or self.name + else: + return self.name + +_T = TypeVar('_T', bound="Token") + +class Token(str): + """A string with meta-information, that is produced by the lexer. + + When parsing text, the resulting chunks of the input that haven't been discarded, + will end up in the tree as Token instances. The Token class inherits from Python's ``str``, + so normal string comparisons and operations will work as expected. + + Attributes: + type: Name of the token (as specified in grammar) + value: Value of the token (redundant, as ``token.value == token`` will always be true) + start_pos: The index of the token in the text + line: The line of the token in the text (starting with 1) + column: The column of the token in the text (starting with 1) + end_line: The line where the token ends + end_column: The next column after the end of the token. For example, + if the token is a single character with a column value of 4, + end_column will be 5. + end_pos: the index where the token ends (basically ``start_pos + len(token)``) + """ + __slots__ = ('type', 'start_pos', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos') + + __match_args__ = ('type', 'value') + + type: str + start_pos: Optional[int] + value: Any + line: Optional[int] + column: Optional[int] + end_line: Optional[int] + end_column: Optional[int] + end_pos: Optional[int] + + + @overload + def __new__( + cls, + type: str, + value: Any, + start_pos: Optional[int]=None, + line: Optional[int]=None, + column: Optional[int]=None, + end_line: Optional[int]=None, + end_column: Optional[int]=None, + end_pos: Optional[int]=None + ) -> 'Token': + ... + + @overload + def __new__( + cls, + type_: str, + value: Any, + start_pos: Optional[int]=None, + line: Optional[int]=None, + column: Optional[int]=None, + end_line: Optional[int]=None, + end_column: Optional[int]=None, + end_pos: Optional[int]=None + ) -> 'Token': ... + + def __new__(cls, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return cls._future_new(*args, **kwargs) + + + @classmethod + def _future_new(cls, type, value, start_pos=None, line=None, column=None, end_line=None, end_column=None, end_pos=None): + inst = super(Token, cls).__new__(cls, value) + + inst.type = type + inst.start_pos = start_pos + inst.value = value + inst.line = line + inst.column = column + inst.end_line = end_line + inst.end_column = end_column + inst.end_pos = end_pos + return inst + + @overload + def update(self, type: Optional[str]=None, value: Optional[Any]=None) -> 'Token': + ... + + @overload + def update(self, type_: Optional[str]=None, value: Optional[Any]=None) -> 'Token': + ... + + def update(self, *args, **kwargs): + if "type_" in kwargs: + warnings.warn("`type_` is deprecated use `type` instead", DeprecationWarning) + + if "type" in kwargs: + raise TypeError("Error: using both 'type' and the deprecated 'type_' as arguments.") + kwargs["type"] = kwargs.pop("type_") + + return self._future_update(*args, **kwargs) + + def _future_update(self, type: Optional[str]=None, value: Optional[Any]=None) -> 'Token': + return Token.new_borrow_pos( + type if type is not None else self.type, + value if value is not None else self.value, + self + ) + + @classmethod + def new_borrow_pos(cls: Type[_T], type_: str, value: Any, borrow_t: 'Token') -> _T: + return cls(type_, value, borrow_t.start_pos, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos) + + def __reduce__(self): + return (self.__class__, (self.type, self.value, self.start_pos, self.line, self.column)) + + def __repr__(self): + return 'Token(%r, %r)' % (self.type, self.value) + + def __deepcopy__(self, memo): + return Token(self.type, self.value, self.start_pos, self.line, self.column) + + def __eq__(self, other): + if isinstance(other, Token) and self.type != other.type: + return False + + return str.__eq__(self, other) + + __hash__ = str.__hash__ + + +class LineCounter: + __slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char' + + def __init__(self, newline_char): + self.newline_char = newline_char + self.char_pos = 0 + self.line = 1 + self.column = 1 + self.line_start_pos = 0 + + def __eq__(self, other): + if not isinstance(other, LineCounter): + return NotImplemented + + return self.char_pos == other.char_pos and self.newline_char == other.newline_char + + def feed(self, token: Token, test_newline=True): + """Consume a token and calculate the new line & column. + + As an optional optimization, set test_newline=False if token doesn't contain a newline. + """ + if test_newline: + newlines = token.count(self.newline_char) + if newlines: + self.line += newlines + self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1 + + self.char_pos += len(token) + self.column = self.char_pos - self.line_start_pos + 1 + + +class UnlessCallback: + def __init__(self, scanner): + self.scanner = scanner + + def __call__(self, t): + res = self.scanner.match(t.value, 0) + if res: + _value, t.type = res + return t + + +class CallChain: + def __init__(self, callback1, callback2, cond): + self.callback1 = callback1 + self.callback2 = callback2 + self.cond = cond + + def __call__(self, t): + t2 = self.callback1(t) + return self.callback2(t) if self.cond(t2) else t2 + + +def _get_match(re_, regexp, s, flags): + m = re_.match(regexp, s, flags) + if m: + return m.group(0) + +def _create_unless(terminals, g_regex_flags, re_, use_bytes): + tokens_by_type = classify(terminals, lambda t: type(t.pattern)) + assert len(tokens_by_type) <= 2, tokens_by_type.keys() + embedded_strs = set() + callback = {} + for retok in tokens_by_type.get(PatternRE, []): + unless = [] + for strtok in tokens_by_type.get(PatternStr, []): + if strtok.priority != retok.priority: + continue + s = strtok.pattern.value + if s == _get_match(re_, retok.pattern.to_regexp(), s, g_regex_flags): + unless.append(strtok) + if strtok.pattern.flags <= retok.pattern.flags: + embedded_strs.add(strtok) + if unless: + callback[retok.name] = UnlessCallback(Scanner(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes)) + + new_terminals = [t for t in terminals if t not in embedded_strs] + return new_terminals, callback + + +class Scanner: + def __init__(self, terminals, g_regex_flags, re_, use_bytes, match_whole=False): + self.terminals = terminals + self.g_regex_flags = g_regex_flags + self.re_ = re_ + self.use_bytes = use_bytes + self.match_whole = match_whole + + self.allowed_types = {t.name for t in self.terminals} + + self._mres = self._build_mres(terminals, len(terminals)) + + def _build_mres(self, terminals, max_size): + # Python sets an unreasonable group limit (currently 100) in its re module + # Worse, the only way to know we reached it is by catching an AssertionError! + # This function recursively tries less and less groups until it's successful. + postfix = '$' if self.match_whole else '' + mres = [] + while terminals: + pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size]) + if self.use_bytes: + pattern = pattern.encode('latin-1') + try: + mre = self.re_.compile(pattern, self.g_regex_flags) + except AssertionError: # Yes, this is what Python provides us.. :/ + return self._build_mres(terminals, max_size//2) + + mres.append(mre) + terminals = terminals[max_size:] + return mres + + def match(self, text, pos): + for mre in self._mres: + m = mre.match(text, pos) + if m: + return m.group(0), m.lastgroup + + +def _regexp_has_newline(r: str): + r"""Expressions that may indicate newlines in a regexp: + - newlines (\n) + - escaped newline (\\n) + - anything but ([^...]) + - any-char (.) when the flag (?s) exists + - spaces (\s) + """ + return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r) + + +class LexerState: + """Represents the current state of the lexer as it scans the text + (Lexer objects are only instanciated per grammar, not per text) + """ + + __slots__ = 'text', 'line_ctr', 'last_token' + + def __init__(self, text, line_ctr=None, last_token=None): + self.text = text + self.line_ctr = line_ctr or LineCounter(b'\n' if isinstance(text, bytes) else '\n') + self.last_token = last_token + + def __eq__(self, other): + if not isinstance(other, LexerState): + return NotImplemented + + return self.text is other.text and self.line_ctr == other.line_ctr and self.last_token == other.last_token + + def __copy__(self): + return type(self)(self.text, copy(self.line_ctr), self.last_token) + + +class LexerThread: + """A thread that ties a lexer instance and a lexer state, to be used by the parser + """ + + def __init__(self, lexer: 'Lexer', lexer_state: LexerState): + self.lexer = lexer + self.state = lexer_state + + @classmethod + def from_text(cls, lexer: 'Lexer', text: str): + return cls(lexer, LexerState(text)) + + def lex(self, parser_state): + return self.lexer.lex(self.state, parser_state) + + def __copy__(self): + return type(self)(self.lexer, copy(self.state)) + + _Token = Token + + +_Callback = Callable[[Token], Token] + +class Lexer(ABC): + """Lexer interface + + Method Signatures: + lex(self, lexer_state, parser_state) -> Iterator[Token] + """ + @abstractmethod + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + return NotImplemented + + def make_lexer_state(self, text): + "Deprecated" + return LexerState(text) + + +class BasicLexer(Lexer): + + terminals: Collection[TerminalDef] + ignore_types: FrozenSet[str] + newline_types: FrozenSet[str] + user_callbacks: Dict[str, _Callback] + callback: Dict[str, _Callback] + re: ModuleType + + def __init__(self, conf: 'LexerConf') -> None: + terminals = list(conf.terminals) + assert all(isinstance(t, TerminalDef) for t in terminals), terminals + + self.re = conf.re_module + + if not conf.skip_validation: + # Sanitization + for t in terminals: + try: + self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags) + except self.re.error: + raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern)) + + if t.pattern.min_width == 0: + raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern)) + + if not (set(conf.ignore) <= {t.name for t in terminals}): + raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals})) + + # Init + self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp())) + self.ignore_types = frozenset(conf.ignore) + + terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name)) + self.terminals = terminals + self.user_callbacks = conf.callbacks + self.g_regex_flags = conf.g_regex_flags + self.use_bytes = conf.use_bytes + self.terminals_by_name = conf.terminals_by_name + + self._scanner = None + + def _build_scanner(self): + terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes) + assert all(self.callback.values()) + + for type_, f in self.user_callbacks.items(): + if type_ in self.callback: + # Already a callback there, probably UnlessCallback + self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_) + else: + self.callback[type_] = f + + self._scanner = Scanner(terminals, self.g_regex_flags, self.re, self.use_bytes) + + @property + def scanner(self): + if self._scanner is None: + self._build_scanner() + return self._scanner + + def match(self, text, pos): + return self.scanner.match(text, pos) + + def lex(self, state: LexerState, parser_state: Any) -> Iterator[Token]: + with suppress(EOFError): + while True: + yield self.next_token(state, parser_state) + + def next_token(self, lex_state: LexerState, parser_state: Any=None) -> Token: + line_ctr = lex_state.line_ctr + while line_ctr.char_pos < len(lex_state.text): + res = self.match(lex_state.text, line_ctr.char_pos) + if not res: + allowed = self.scanner.allowed_types - self.ignore_types + if not allowed: + allowed = {""} + raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column, + allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token], + state=parser_state, terminals_by_name=self.terminals_by_name) + + value, type_ = res + + if type_ not in self.ignore_types: + t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + line_ctr.feed(value, type_ in self.newline_types) + t.end_line = line_ctr.line + t.end_column = line_ctr.column + t.end_pos = line_ctr.char_pos + if t.type in self.callback: + t = self.callback[t.type](t) + if not isinstance(t, Token): + raise LexError("Callbacks must return a token (returned %r)" % t) + lex_state.last_token = t + return t + else: + if type_ in self.callback: + t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column) + self.callback[type_](t2) + line_ctr.feed(value, type_ in self.newline_types) + + # EOF + raise EOFError(self) + + +class ContextualLexer(Lexer): + + lexers: Dict[str, BasicLexer] + root_lexer: BasicLexer + + def __init__(self, conf: 'LexerConf', states: Dict[str, Collection[str]], always_accept: Collection[str]=()) -> None: + terminals = list(conf.terminals) + terminals_by_name = conf.terminals_by_name + + trad_conf = copy(conf) + trad_conf.terminals = terminals + + lexer_by_tokens: Dict[FrozenSet[str], BasicLexer] = {} + self.lexers = {} + for state, accepts in states.items(): + key = frozenset(accepts) + try: + lexer = lexer_by_tokens[key] + except KeyError: + accepts = set(accepts) | set(conf.ignore) | set(always_accept) + lexer_conf = copy(trad_conf) + lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name] + lexer = BasicLexer(lexer_conf) + lexer_by_tokens[key] = lexer + + self.lexers[state] = lexer + + assert trad_conf.terminals is terminals + self.root_lexer = BasicLexer(trad_conf) + + def lex(self, lexer_state: LexerState, parser_state: Any) -> Iterator[Token]: + try: + while True: + lexer = self.lexers[parser_state.position] + yield lexer.next_token(lexer_state, parser_state) + except EOFError: + pass + except UnexpectedCharacters as e: + # In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context. + # This tests the input against the global context, to provide a nicer error. + try: + last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token + token = self.root_lexer.next_token(lexer_state, parser_state) + raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name) + except UnexpectedCharacters: + raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set. + +###} diff --git a/src/poetry/core/_vendor/lark/load_grammar.py b/src/poetry/core/_vendor/lark/load_grammar.py new file mode 100644 index 0000000..d4f553c --- /dev/null +++ b/src/poetry/core/_vendor/lark/load_grammar.py @@ -0,0 +1,1423 @@ +"""Parses and creates Grammar objects""" +import hashlib +import os.path +import sys +from collections import namedtuple +from copy import copy, deepcopy +import pkgutil +from ast import literal_eval +from contextlib import suppress +from typing import List, Tuple, Union, Callable, Dict, Optional, Sequence + +from .utils import bfs, logger, classify_bool, is_id_continue, is_id_start, bfs_all_unique, small_factors +from .lexer import Token, TerminalDef, PatternStr, PatternRE + +from .parse_tree_builder import ParseTreeBuilder +from .parser_frontends import ParsingFrontend +from .common import LexerConf, ParserConf +from .grammar import RuleOptions, Rule, Terminal, NonTerminal, Symbol, TOKEN_DEFAULT_PRIORITY +from .utils import classify, dedup_list +from .exceptions import GrammarError, UnexpectedCharacters, UnexpectedToken, ParseError, UnexpectedInput + +from .tree import Tree, SlottedTree as ST +from .visitors import Transformer, Visitor, v_args, Transformer_InPlace, Transformer_NonRecursive +inline_args = v_args(inline=True) + +__path__ = os.path.dirname(__file__) +IMPORT_PATHS = ['grammars'] + +EXT = '.lark' + +_RE_FLAGS = 'imslux' + +_EMPTY = Symbol('__empty__') + +_TERMINAL_NAMES = { + '.' : 'DOT', + ',' : 'COMMA', + ':' : 'COLON', + ';' : 'SEMICOLON', + '+' : 'PLUS', + '-' : 'MINUS', + '*' : 'STAR', + '/' : 'SLASH', + '\\' : 'BACKSLASH', + '|' : 'VBAR', + '?' : 'QMARK', + '!' : 'BANG', + '@' : 'AT', + '#' : 'HASH', + '$' : 'DOLLAR', + '%' : 'PERCENT', + '^' : 'CIRCUMFLEX', + '&' : 'AMPERSAND', + '_' : 'UNDERSCORE', + '<' : 'LESSTHAN', + '>' : 'MORETHAN', + '=' : 'EQUAL', + '"' : 'DBLQUOTE', + '\'' : 'QUOTE', + '`' : 'BACKQUOTE', + '~' : 'TILDE', + '(' : 'LPAR', + ')' : 'RPAR', + '{' : 'LBRACE', + '}' : 'RBRACE', + '[' : 'LSQB', + ']' : 'RSQB', + '\n' : 'NEWLINE', + '\r\n' : 'CRLF', + '\t' : 'TAB', + ' ' : 'SPACE', +} + +# Grammar Parser +TERMINALS = { + '_LPAR': r'\(', + '_RPAR': r'\)', + '_LBRA': r'\[', + '_RBRA': r'\]', + '_LBRACE': r'\{', + '_RBRACE': r'\}', + 'OP': '[+*]|[?](?![a-z])', + '_COLON': ':', + '_COMMA': ',', + '_OR': r'\|', + '_DOT': r'\.(?!\.)', + '_DOTDOT': r'\.\.', + 'TILDE': '~', + 'RULE_MODIFIERS': '(!|![?]?|[?]!?)(?=[_a-z])', + 'RULE': '_?[a-z][_a-z0-9]*', + 'TERMINAL': '_?[A-Z][_A-Z0-9]*', + 'STRING': r'"(\\"|\\\\|[^"\n])*?"i?', + 'REGEXP': r'/(?!/)(\\/|\\\\|[^/])*?/[%s]*' % _RE_FLAGS, + '_NL': r'(\r?\n)+\s*', + '_NL_OR': r'(\r?\n)+\s*\|', + 'WS': r'[ \t]+', + 'COMMENT': r'\s*//[^\n]*', + 'BACKSLASH': r'\\[ ]*\n', + '_TO': '->', + '_IGNORE': r'%ignore', + '_OVERRIDE': r'%override', + '_DECLARE': r'%declare', + '_EXTEND': r'%extend', + '_IMPORT': r'%import', + 'NUMBER': r'[+-]?\d+', +} + +RULES = { + 'start': ['_list'], + '_list': ['_item', '_list _item'], + '_item': ['rule', 'term', 'ignore', 'import', 'declare', 'override', 'extend', '_NL'], + + 'rule': ['rule_modifiers RULE template_params priority _COLON expansions _NL'], + 'rule_modifiers': ['RULE_MODIFIERS', + ''], + 'priority': ['_DOT NUMBER', + ''], + 'template_params': ['_LBRACE _template_params _RBRACE', + ''], + '_template_params': ['RULE', + '_template_params _COMMA RULE'], + 'expansions': ['_expansions'], + '_expansions': ['alias', + '_expansions _OR alias', + '_expansions _NL_OR alias'], + + '?alias': ['expansion _TO nonterminal', 'expansion'], + 'expansion': ['_expansion'], + + '_expansion': ['', '_expansion expr'], + + '?expr': ['atom', + 'atom OP', + 'atom TILDE NUMBER', + 'atom TILDE NUMBER _DOTDOT NUMBER', + ], + + '?atom': ['_LPAR expansions _RPAR', + 'maybe', + 'value'], + + 'value': ['terminal', + 'nonterminal', + 'literal', + 'range', + 'template_usage'], + + 'terminal': ['TERMINAL'], + 'nonterminal': ['RULE'], + + '?name': ['RULE', 'TERMINAL'], + '?symbol': ['terminal', 'nonterminal'], + + 'maybe': ['_LBRA expansions _RBRA'], + 'range': ['STRING _DOTDOT STRING'], + + 'template_usage': ['nonterminal _LBRACE _template_args _RBRACE'], + '_template_args': ['value', + '_template_args _COMMA value'], + + 'term': ['TERMINAL _COLON expansions _NL', + 'TERMINAL _DOT NUMBER _COLON expansions _NL'], + 'override': ['_OVERRIDE rule', + '_OVERRIDE term'], + 'extend': ['_EXTEND rule', + '_EXTEND term'], + 'ignore': ['_IGNORE expansions _NL'], + 'declare': ['_DECLARE _declare_args _NL'], + 'import': ['_IMPORT _import_path _NL', + '_IMPORT _import_path _LPAR name_list _RPAR _NL', + '_IMPORT _import_path _TO name _NL'], + + '_import_path': ['import_lib', 'import_rel'], + 'import_lib': ['_import_args'], + 'import_rel': ['_DOT _import_args'], + '_import_args': ['name', '_import_args _DOT name'], + + 'name_list': ['_name_list'], + '_name_list': ['name', '_name_list _COMMA name'], + + '_declare_args': ['symbol', '_declare_args symbol'], + 'literal': ['REGEXP', 'STRING'], +} + + +# Value 5 keeps the number of states in the lalr parser somewhat minimal +# It isn't optimal, but close to it. See PR #949 +SMALL_FACTOR_THRESHOLD = 5 +# The Threshold whether repeat via ~ are split up into different rules +# 50 is chosen since it keeps the number of states low and therefore lalr analysis time low, +# while not being to overaggressive and unnecessarily creating rules that might create shift/reduce conflicts. +# (See PR #949) +REPEAT_BREAK_THRESHOLD = 50 + + +class FindRuleSize(Transformer): + def __init__(self, keep_all_tokens): + self.keep_all_tokens = keep_all_tokens + + def _will_not_get_removed(self, sym): + if isinstance(sym, NonTerminal): + return not sym.name.startswith('_') + if isinstance(sym, Terminal): + return self.keep_all_tokens or not sym.filter_out + if sym is _EMPTY: + return False + assert False, sym + + def _args_as_int(self, args): + for a in args: + if isinstance(a, int): + yield a + elif isinstance(a, Symbol): + yield 1 if self._will_not_get_removed(a) else 0 + else: + assert False + + def expansion(self, args): + return sum(self._args_as_int(args)) + + def expansions(self, args): + return max(self._args_as_int(args)) + + +@inline_args +class EBNF_to_BNF(Transformer_InPlace): + def __init__(self): + self.new_rules = [] + self.rules_cache = {} + self.prefix = 'anon' + self.i = 0 + self.rule_options = None + + def _name_rule(self, inner): + new_name = '__%s_%s_%d' % (self.prefix, inner, self.i) + self.i += 1 + return new_name + + def _add_rule(self, key, name, expansions): + t = NonTerminal(name) + self.new_rules.append((name, expansions, self.rule_options)) + self.rules_cache[key] = t + return t + + def _add_recurse_rule(self, type_, expr): + try: + return self.rules_cache[expr] + except KeyError: + new_name = self._name_rule(type_) + t = NonTerminal(new_name) + tree = ST('expansions', [ + ST('expansion', [expr]), + ST('expansion', [t, expr]) + ]) + return self._add_rule(expr, new_name, tree) + + def _add_repeat_rule(self, a, b, target, atom): + """Generate a rule that repeats target ``a`` times, and repeats atom ``b`` times. + + When called recursively (into target), it repeats atom for x(n) times, where: + x(0) = 1 + x(n) = a(n) * x(n-1) + b + + Example rule when a=3, b=4: + + new_rule: target target target atom atom atom atom + + """ + key = (a, b, target, atom) + try: + return self.rules_cache[key] + except KeyError: + new_name = self._name_rule('repeat_a%d_b%d' % (a, b)) + tree = ST('expansions', [ST('expansion', [target] * a + [atom] * b)]) + return self._add_rule(key, new_name, tree) + + def _add_repeat_opt_rule(self, a, b, target, target_opt, atom): + """Creates a rule that matches atom 0 to (a*n+b)-1 times. + + When target matches n times atom, and target_opt 0 to n-1 times target_opt, + + First we generate target * i followed by target_opt, for i from 0 to a-1 + These match 0 to n*a - 1 times atom + + Then we generate target * a followed by atom * i, for i from 0 to b-1 + These match n*a to n*a + b-1 times atom + + The created rule will not have any shift/reduce conflicts so that it can be used with lalr + + Example rule when a=3, b=4: + + new_rule: target_opt + | target target_opt + | target target target_opt + + | target target target + | target target target atom + | target target target atom atom + | target target target atom atom atom + + """ + key = (a, b, target, atom, "opt") + try: + return self.rules_cache[key] + except KeyError: + new_name = self._name_rule('repeat_a%d_b%d_opt' % (a, b)) + tree = ST('expansions', [ + ST('expansion', [target]*i + [target_opt]) for i in range(a) + ] + [ + ST('expansion', [target]*a + [atom]*i) for i in range(b) + ]) + return self._add_rule(key, new_name, tree) + + def _generate_repeats(self, rule, mn, mx): + """Generates a rule tree that repeats ``rule`` exactly between ``mn`` to ``mx`` times. + """ + # For a small number of repeats, we can take the naive approach + if mx < REPEAT_BREAK_THRESHOLD: + return ST('expansions', [ST('expansion', [rule] * n) for n in range(mn, mx + 1)]) + + # For large repeat values, we break the repetition into sub-rules. + # We treat ``rule~mn..mx`` as ``rule~mn rule~0..(diff=mx-mn)``. + # We then use small_factors to split up mn and diff up into values [(a, b), ...] + # This values are used with the help of _add_repeat_rule and _add_repeat_rule_opt + # to generate a complete rule/expression that matches the corresponding number of repeats + mn_target = rule + for a, b in small_factors(mn, SMALL_FACTOR_THRESHOLD): + mn_target = self._add_repeat_rule(a, b, mn_target, rule) + if mx == mn: + return mn_target + + diff = mx - mn + 1 # We add one because _add_repeat_opt_rule generates rules that match one less + diff_factors = small_factors(diff, SMALL_FACTOR_THRESHOLD) + diff_target = rule # Match rule 1 times + diff_opt_target = ST('expansion', []) # match rule 0 times (e.g. up to 1 -1 times) + for a, b in diff_factors[:-1]: + diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) + diff_target = self._add_repeat_rule(a, b, diff_target, rule) + + a, b = diff_factors[-1] + diff_opt_target = self._add_repeat_opt_rule(a, b, diff_target, diff_opt_target, rule) + + return ST('expansions', [ST('expansion', [mn_target] + [diff_opt_target])]) + + def expr(self, rule, op, *args): + if op.value == '?': + empty = ST('expansion', []) + return ST('expansions', [rule, empty]) + elif op.value == '+': + # a : b c+ d + # --> + # a : b _c d + # _c : _c c | c; + return self._add_recurse_rule('plus', rule) + elif op.value == '*': + # a : b c* d + # --> + # a : b _c? d + # _c : _c c | c; + new_name = self._add_recurse_rule('star', rule) + return ST('expansions', [new_name, ST('expansion', [])]) + elif op.value == '~': + if len(args) == 1: + mn = mx = int(args[0]) + else: + mn, mx = map(int, args) + if mx < mn or mn < 0: + raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (rule, mn, mx)) + + return self._generate_repeats(rule, mn, mx) + + assert False, op + + def maybe(self, rule): + keep_all_tokens = self.rule_options and self.rule_options.keep_all_tokens + rule_size = FindRuleSize(keep_all_tokens).transform(rule) + empty = ST('expansion', [_EMPTY] * rule_size) + return ST('expansions', [rule, empty]) + + +class SimplifyRule_Visitor(Visitor): + + @staticmethod + def _flatten(tree): + while tree.expand_kids_by_data(tree.data): + pass + + def expansion(self, tree): + # rules_list unpacking + # a : b (c|d) e + # --> + # a : b c e | b d e + # + # In AST terms: + # expansion(b, expansions(c, d), e) + # --> + # expansions( expansion(b, c, e), expansion(b, d, e) ) + + self._flatten(tree) + + for i, child in enumerate(tree.children): + if isinstance(child, Tree) and child.data == 'expansions': + tree.data = 'expansions' + tree.children = [self.visit(ST('expansion', [option if i == j else other + for j, other in enumerate(tree.children)])) + for option in dedup_list(child.children)] + self._flatten(tree) + break + + def alias(self, tree): + rule, alias_name = tree.children + if rule.data == 'expansions': + aliases = [] + for child in tree.children[0].children: + aliases.append(ST('alias', [child, alias_name])) + tree.data = 'expansions' + tree.children = aliases + + def expansions(self, tree): + self._flatten(tree) + # Ensure all children are unique + if len(set(tree.children)) != len(tree.children): + tree.children = dedup_list(tree.children) # dedup is expensive, so try to minimize its use + + +class RuleTreeToText(Transformer): + def expansions(self, x): + return x + + def expansion(self, symbols): + return symbols, None + + def alias(self, x): + (expansion, _alias), alias = x + assert _alias is None, (alias, expansion, '-', _alias) # Double alias not allowed + return expansion, alias.name + + +class PrepareAnonTerminals(Transformer_InPlace): + """Create a unique list of anonymous terminals. Attempt to give meaningful names to them when we add them""" + + def __init__(self, terminals): + self.terminals = terminals + self.term_set = {td.name for td in self.terminals} + self.term_reverse = {td.pattern: td for td in terminals} + self.i = 0 + self.rule_options = None + + @inline_args + def pattern(self, p): + value = p.value + if p in self.term_reverse and p.flags != self.term_reverse[p].pattern.flags: + raise GrammarError(u'Conflicting flags for the same terminal: %s' % p) + + term_name = None + + if isinstance(p, PatternStr): + try: + # If already defined, use the user-defined terminal name + term_name = self.term_reverse[p].name + except KeyError: + # Try to assign an indicative anon-terminal name + try: + term_name = _TERMINAL_NAMES[value] + except KeyError: + if value and is_id_continue(value) and is_id_start(value[0]) and value.upper() not in self.term_set: + term_name = value.upper() + + if term_name in self.term_set: + term_name = None + + elif isinstance(p, PatternRE): + if p in self.term_reverse: # Kind of a weird placement.name + term_name = self.term_reverse[p].name + else: + assert False, p + + if term_name is None: + term_name = '__ANON_%d' % self.i + self.i += 1 + + if term_name not in self.term_set: + assert p not in self.term_reverse + self.term_set.add(term_name) + termdef = TerminalDef(term_name, p) + self.term_reverse[p] = termdef + self.terminals.append(termdef) + + filter_out = False if self.rule_options and self.rule_options.keep_all_tokens else isinstance(p, PatternStr) + + return Terminal(term_name, filter_out=filter_out) + + +class _ReplaceSymbols(Transformer_InPlace): + """Helper for ApplyTemplates""" + + def __init__(self): + self.names = {} + + def value(self, c): + if len(c) == 1 and isinstance(c[0], Symbol) and c[0].name in self.names: + return self.names[c[0].name] + return self.__default__('value', c, None) + + def template_usage(self, c): + name = c[0].name + if name in self.names: + return self.__default__('template_usage', [self.names[name]] + c[1:], None) + return self.__default__('template_usage', c, None) + + +class ApplyTemplates(Transformer_InPlace): + """Apply the templates, creating new rules that represent the used templates""" + + def __init__(self, rule_defs): + self.rule_defs = rule_defs + self.replacer = _ReplaceSymbols() + self.created_templates = set() + + def template_usage(self, c): + name = c[0].name + args = c[1:] + result_name = "%s{%s}" % (name, ",".join(a.name for a in args)) + if result_name not in self.created_templates: + self.created_templates.add(result_name) + (_n, params, tree, options) ,= (t for t in self.rule_defs if t[0] == name) + assert len(params) == len(args), args + result_tree = deepcopy(tree) + self.replacer.names = dict(zip(params, args)) + self.replacer.transform(result_tree) + self.rule_defs.append((result_name, [], result_tree, deepcopy(options))) + return NonTerminal(result_name) + + +def _rfind(s, choices): + return max(s.rfind(c) for c in choices) + + +def eval_escaping(s): + w = '' + i = iter(s) + for n in i: + w += n + if n == '\\': + try: + n2 = next(i) + except StopIteration: + raise GrammarError("Literal ended unexpectedly (bad escaping): `%r`" % s) + if n2 == '\\': + w += '\\\\' + elif n2 not in 'Uuxnftr': + w += '\\' + w += n2 + w = w.replace('\\"', '"').replace("'", "\\'") + + to_eval = "u'''%s'''" % w + try: + s = literal_eval(to_eval) + except SyntaxError as e: + raise GrammarError(s, e) + + return s + + +def _literal_to_pattern(literal): + assert isinstance(literal, Token) + v = literal.value + flag_start = _rfind(v, '/"')+1 + assert flag_start > 0 + flags = v[flag_start:] + assert all(f in _RE_FLAGS for f in flags), flags + + if literal.type == 'STRING' and '\n' in v: + raise GrammarError('You cannot put newlines in string literals') + + if literal.type == 'REGEXP' and '\n' in v and 'x' not in flags: + raise GrammarError('You can only use newlines in regular expressions ' + 'with the `x` (verbose) flag') + + v = v[:flag_start] + assert v[0] == v[-1] and v[0] in '"/' + x = v[1:-1] + + s = eval_escaping(x) + + if s == "": + raise GrammarError("Empty terminals are not allowed (%s)" % literal) + + if literal.type == 'STRING': + s = s.replace('\\\\', '\\') + return PatternStr(s, flags, raw=literal.value) + elif literal.type == 'REGEXP': + return PatternRE(s, flags, raw=literal.value) + else: + assert False, 'Invariant failed: literal.type not in ["STRING", "REGEXP"]' + + +@inline_args +class PrepareLiterals(Transformer_InPlace): + def literal(self, literal): + return ST('pattern', [_literal_to_pattern(literal)]) + + def range(self, start, end): + assert start.type == end.type == 'STRING' + start = start.value[1:-1] + end = end.value[1:-1] + assert len(eval_escaping(start)) == len(eval_escaping(end)) == 1 + regexp = '[%s-%s]' % (start, end) + return ST('pattern', [PatternRE(regexp)]) + + +def _make_joined_pattern(regexp, flags_set): + return PatternRE(regexp, ()) + +class TerminalTreeToPattern(Transformer_NonRecursive): + def pattern(self, ps): + p ,= ps + return p + + def expansion(self, items): + assert items + if len(items) == 1: + return items[0] + + pattern = ''.join(i.to_regexp() for i in items) + return _make_joined_pattern(pattern, {i.flags for i in items}) + + def expansions(self, exps): + if len(exps) == 1: + return exps[0] + + # Do a bit of sorting to make sure that the longest option is returned + # (Python's re module otherwise prefers just 'l' when given (l|ll) and both could match) + exps.sort(key=lambda x: (-x.max_width, -x.min_width, -len(x.value))) + + pattern = '(?:%s)' % ('|'.join(i.to_regexp() for i in exps)) + return _make_joined_pattern(pattern, {i.flags for i in exps}) + + def expr(self, args): + inner, op = args[:2] + if op == '~': + if len(args) == 3: + op = "{%d}" % int(args[2]) + else: + mn, mx = map(int, args[2:]) + if mx < mn: + raise GrammarError("Bad Range for %s (%d..%d isn't allowed)" % (inner, mn, mx)) + op = "{%d,%d}" % (mn, mx) + else: + assert len(args) == 2 + return PatternRE('(?:%s)%s' % (inner.to_regexp(), op), inner.flags) + + def maybe(self, expr): + return self.expr(expr + ['?']) + + def alias(self, t): + raise GrammarError("Aliasing not allowed in terminals (You used -> in the wrong place)") + + def value(self, v): + return v[0] + + +class ValidateSymbols(Transformer_InPlace): + def value(self, v): + v ,= v + assert isinstance(v, (Tree, Symbol)) + return v + + +def nr_deepcopy_tree(t): + """Deepcopy tree `t` without recursion""" + return Transformer_NonRecursive(False).transform(t) + + +class Grammar: + + term_defs: List[Tuple[str, Tuple[Tree, int]]] + rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]] + ignore: List[str] + + def __init__(self, rule_defs: List[Tuple[str, Tuple[str, ...], Tree, RuleOptions]], term_defs: List[Tuple[str, Tuple[Tree, int]]], ignore: List[str]) -> None: + self.term_defs = term_defs + self.rule_defs = rule_defs + self.ignore = ignore + + def compile(self, start, terminals_to_keep): + # We change the trees in-place (to support huge grammars) + # So deepcopy allows calling compile more than once. + term_defs = [(n, (nr_deepcopy_tree(t), p)) for n, (t, p) in self.term_defs] + rule_defs = [(n, p, nr_deepcopy_tree(t), o) for n, p, t, o in self.rule_defs] + + # =================== + # Compile Terminals + # =================== + + # Convert terminal-trees to strings/regexps + + for name, (term_tree, priority) in term_defs: + if term_tree is None: # Terminal added through %declare + continue + expansions = list(term_tree.find_data('expansion')) + if len(expansions) == 1 and not expansions[0].children: + raise GrammarError("Terminals cannot be empty (%s)" % name) + + transformer = PrepareLiterals() * TerminalTreeToPattern() + terminals = [TerminalDef(name, transformer.transform(term_tree), priority) + for name, (term_tree, priority) in term_defs if term_tree] + + # ================= + # Compile Rules + # ================= + + # 1. Pre-process terminals + anon_tokens_transf = PrepareAnonTerminals(terminals) + transformer = PrepareLiterals() * ValidateSymbols() * anon_tokens_transf # Adds to terminals + + # 2. Inline Templates + + transformer *= ApplyTemplates(rule_defs) + + # 3. Convert EBNF to BNF (and apply step 1 & 2) + ebnf_to_bnf = EBNF_to_BNF() + rules = [] + i = 0 + while i < len(rule_defs): # We have to do it like this because rule_defs might grow due to templates + name, params, rule_tree, options = rule_defs[i] + i += 1 + if len(params) != 0: # Dont transform templates + continue + rule_options = RuleOptions(keep_all_tokens=True) if options and options.keep_all_tokens else None + ebnf_to_bnf.rule_options = rule_options + ebnf_to_bnf.prefix = name + anon_tokens_transf.rule_options = rule_options + tree = transformer.transform(rule_tree) + res = ebnf_to_bnf.transform(tree) + rules.append((name, res, options)) + rules += ebnf_to_bnf.new_rules + + assert len(rules) == len({name for name, _t, _o in rules}), "Whoops, name collision" + + # 4. Compile tree to Rule objects + rule_tree_to_text = RuleTreeToText() + + simplify_rule = SimplifyRule_Visitor() + compiled_rules = [] + for rule_content in rules: + name, tree, options = rule_content + simplify_rule.visit(tree) + expansions = rule_tree_to_text.transform(tree) + + for i, (expansion, alias) in enumerate(expansions): + if alias and name.startswith('_'): + raise GrammarError("Rule %s is marked for expansion (it starts with an underscore) and isn't allowed to have aliases (alias=%s)"% (name, alias)) + + empty_indices = [x==_EMPTY for x in expansion] + if any(empty_indices): + exp_options = copy(options) or RuleOptions() + exp_options.empty_indices = empty_indices + expansion = [x for x in expansion if x!=_EMPTY] + else: + exp_options = options + + for sym in expansion: + assert isinstance(sym, Symbol) + if sym.is_term and exp_options and exp_options.keep_all_tokens: + sym.filter_out = False + rule = Rule(NonTerminal(name), expansion, i, alias, exp_options) + compiled_rules.append(rule) + + # Remove duplicates of empty rules, throw error for non-empty duplicates + if len(set(compiled_rules)) != len(compiled_rules): + duplicates = classify(compiled_rules, lambda x: x) + for dups in duplicates.values(): + if len(dups) > 1: + if dups[0].expansion: + raise GrammarError("Rules defined twice: %s\n\n(Might happen due to colliding expansion of optionals: [] or ?)" + % ''.join('\n * %s' % i for i in dups)) + + # Empty rule; assert all other attributes are equal + assert len({(r.alias, r.order, r.options) for r in dups}) == len(dups) + + # Remove duplicates + compiled_rules = list(set(compiled_rules)) + + # Filter out unused rules + while True: + c = len(compiled_rules) + used_rules = {s for r in compiled_rules + for s in r.expansion + if isinstance(s, NonTerminal) + and s != r.origin} + used_rules |= {NonTerminal(s) for s in start} + compiled_rules, unused = classify_bool(compiled_rules, lambda r: r.origin in used_rules) + for r in unused: + logger.debug("Unused rule: %s", r) + if len(compiled_rules) == c: + break + + # Filter out unused terminals + if terminals_to_keep != '*': + used_terms = {t.name for r in compiled_rules + for t in r.expansion + if isinstance(t, Terminal)} + terminals, unused = classify_bool(terminals, lambda t: t.name in used_terms or t.name in self.ignore or t.name in terminals_to_keep) + if unused: + logger.debug("Unused terminals: %s", [t.name for t in unused]) + + return terminals, compiled_rules, self.ignore + + +PackageResource = namedtuple('PackageResource', 'pkg_name path') + + +class FromPackageLoader: + """ + Provides a simple way of creating custom import loaders that load from packages via ``pkgutil.get_data`` instead of using `open`. + This allows them to be compatible even from within zip files. + + Relative imports are handled, so you can just freely use them. + + pkg_name: The name of the package. You can probably provide `__name__` most of the time + search_paths: All the path that will be search on absolute imports. + """ + + pkg_name: str + search_paths: Sequence[str] + + def __init__(self, pkg_name: str, search_paths: Sequence[str]=("", )) -> None: + self.pkg_name = pkg_name + self.search_paths = search_paths + + def __repr__(self): + return "%s(%r, %r)" % (type(self).__name__, self.pkg_name, self.search_paths) + + def __call__(self, base_path: Union[None, str, PackageResource], grammar_path: str) -> Tuple[PackageResource, str]: + if base_path is None: + to_try = self.search_paths + else: + # Check whether or not the importing grammar was loaded by this module. + if not isinstance(base_path, PackageResource) or base_path.pkg_name != self.pkg_name: + # Technically false, but FileNotFound doesn't exist in python2.7, and this message should never reach the end user anyway + raise IOError() + to_try = [base_path.path] + + err = None + for path in to_try: + full_path = os.path.join(path, grammar_path) + try: + text: Optional[bytes] = pkgutil.get_data(self.pkg_name, full_path) + except IOError as e: + err = e + continue + else: + return PackageResource(self.pkg_name, full_path), (text.decode() if text else '') + + raise IOError('Cannot find grammar in given paths') from err + + +stdlib_loader = FromPackageLoader('lark', IMPORT_PATHS) + + + +def resolve_term_references(term_dict): + # TODO Solve with transitive closure (maybe) + + while True: + changed = False + for name, token_tree in term_dict.items(): + if token_tree is None: # Terminal added through %declare + continue + for exp in token_tree.find_data('value'): + item ,= exp.children + if isinstance(item, NonTerminal): + raise GrammarError("Rules aren't allowed inside terminals (%s in %s)" % (item, name)) + elif isinstance(item, Terminal): + try: + term_value = term_dict[item.name] + except KeyError: + raise GrammarError("Terminal used but not defined: %s" % item.name) + assert term_value is not None + exp.children[0] = term_value + changed = True + else: + assert isinstance(item, Tree) + if not changed: + break + + for name, term in term_dict.items(): + if term: # Not just declared + for child in term.children: + ids = [id(x) for x in child.iter_subtrees()] + if id(term) in ids: + raise GrammarError("Recursion in terminal '%s' (recursion is only allowed in rules, not terminals)" % name) + + + +def symbol_from_strcase(s): + assert isinstance(s, str) + return Terminal(s, filter_out=s.startswith('_')) if s.isupper() else NonTerminal(s) + +@inline_args +class PrepareGrammar(Transformer_InPlace): + def terminal(self, name): + return Terminal(str(name), filter_out=name.startswith('_')) + + def nonterminal(self, name): + return NonTerminal(name.value) + + +def _find_used_symbols(tree): + assert tree.data == 'expansions' + return {t.name for x in tree.find_data('expansion') + for t in x.scan_values(lambda t: isinstance(t, Symbol))} + + +def _get_parser(): + try: + return _get_parser.cache + except AttributeError: + terminals = [TerminalDef(name, PatternRE(value)) for name, value in TERMINALS.items()] + + rules = [(name.lstrip('?'), x, RuleOptions(expand1=name.startswith('?'))) + for name, x in RULES.items()] + rules = [Rule(NonTerminal(r), [symbol_from_strcase(s) for s in x.split()], i, None, o) + for r, xs, o in rules for i, x in enumerate(xs)] + + callback = ParseTreeBuilder(rules, ST).create_callback() + import re + lexer_conf = LexerConf(terminals, re, ['WS', 'COMMENT', 'BACKSLASH']) + parser_conf = ParserConf(rules, callback, ['start']) + lexer_conf.lexer_type = 'basic' + parser_conf.parser_type = 'lalr' + _get_parser.cache = ParsingFrontend(lexer_conf, parser_conf, None) + return _get_parser.cache + +GRAMMAR_ERRORS = [ + ('Incorrect type of value', ['a: 1\n']), + ('Unclosed parenthesis', ['a: (\n']), + ('Unmatched closing parenthesis', ['a: )\n', 'a: [)\n', 'a: (]\n']), + ('Expecting rule or terminal definition (missing colon)', ['a\n', 'A\n', 'a->\n', 'A->\n', 'a A\n']), + ('Illegal name for rules or terminals', ['Aa:\n']), + ('Alias expects lowercase name', ['a: -> "a"\n']), + ('Unexpected colon', ['a::\n', 'a: b:\n', 'a: B:\n', 'a: "a":\n']), + ('Misplaced operator', ['a: b??', 'a: b(?)', 'a:+\n', 'a:?\n', 'a:*\n', 'a:|*\n']), + ('Expecting option ("|") or a new rule or terminal definition', ['a:a\n()\n']), + ('Terminal names cannot contain dots', ['A.B\n']), + ('Expecting rule or terminal definition', ['"a"\n']), + ('%import expects a name', ['%import "a"\n']), + ('%ignore expects a value', ['%ignore %import\n']), + ] + +def _translate_parser_exception(parse, e): + error = e.match_examples(parse, GRAMMAR_ERRORS, use_accepts=True) + if error: + return error + elif 'STRING' in e.expected: + return "Expecting a value" + +def _parse_grammar(text, name, start='start'): + try: + tree = _get_parser().parse(text + '\n', start) + except UnexpectedCharacters as e: + context = e.get_context(text) + raise GrammarError("Unexpected input at line %d column %d in %s: \n\n%s" % + (e.line, e.column, name, context)) + except UnexpectedToken as e: + context = e.get_context(text) + error = _translate_parser_exception(_get_parser().parse, e) + if error: + raise GrammarError("%s, at line %s column %s\n\n%s" % (error, e.line, e.column, context)) + raise + + return PrepareGrammar().transform(tree) + + +def _error_repr(error): + if isinstance(error, UnexpectedToken): + error2 = _translate_parser_exception(_get_parser().parse, error) + if error2: + return error2 + expected = ', '.join(error.accepts or error.expected) + return "Unexpected token %r. Expected one of: {%s}" % (str(error.token), expected) + else: + return str(error) + +def _search_interactive_parser(interactive_parser, predicate): + def expand(node): + path, p = node + for choice in p.choices(): + t = Token(choice, '') + try: + new_p = p.feed_token(t) + except ParseError: # Illegal + pass + else: + yield path + (choice,), new_p + + for path, p in bfs_all_unique([((), interactive_parser)], expand): + if predicate(p): + return path, p + +def find_grammar_errors(text: str, start: str='start') -> List[Tuple[UnexpectedInput, str]]: + errors = [] + def on_error(e): + errors.append((e, _error_repr(e))) + + # recover to a new line + token_path, _ = _search_interactive_parser(e.interactive_parser.as_immutable(), lambda p: '_NL' in p.choices()) + for token_type in token_path: + e.interactive_parser.feed_token(Token(token_type, '')) + e.interactive_parser.feed_token(Token('_NL', '\n')) + return True + + _tree = _get_parser().parse(text + '\n', start, on_error=on_error) + + errors_by_line = classify(errors, lambda e: e[0].line) + errors = [el[0] for el in errors_by_line.values()] # already sorted + + for e in errors: + e[0].interactive_parser = None + return errors + + +def _get_mangle(prefix, aliases, base_mangle=None): + def mangle(s): + if s in aliases: + s = aliases[s] + else: + if s[0] == '_': + s = '_%s__%s' % (prefix, s[1:]) + else: + s = '%s__%s' % (prefix, s) + if base_mangle is not None: + s = base_mangle(s) + return s + return mangle + +def _mangle_definition_tree(exp, mangle): + if mangle is None: + return exp + exp = deepcopy(exp) # TODO: is this needed? + for t in exp.iter_subtrees(): + for i, c in enumerate(t.children): + if isinstance(c, Symbol): + t.children[i] = c.renamed(mangle) + + return exp + +def _make_rule_tuple(modifiers_tree, name, params, priority_tree, expansions): + if modifiers_tree.children: + m ,= modifiers_tree.children + expand1 = '?' in m + if expand1 and name.startswith('_'): + raise GrammarError("Inlined rules (_rule) cannot use the ?rule modifier.") + keep_all_tokens = '!' in m + else: + keep_all_tokens = False + expand1 = False + + if priority_tree.children: + p ,= priority_tree.children + priority = int(p) + else: + priority = None + + if params is not None: + params = [t.value for t in params.children] # For the grammar parser + + return name, params, expansions, RuleOptions(keep_all_tokens, expand1, priority=priority, + template_source=(name if params else None)) + + +class Definition: + def __init__(self, is_term, tree, params=(), options=None): + self.is_term = is_term + self.tree = tree + self.params = tuple(params) + self.options = options + +class GrammarBuilder: + + global_keep_all_tokens: bool + import_paths: List[Union[str, Callable]] + used_files: Dict[str, str] + + _definitions: Dict[str, Definition] + _ignore_names: List[str] + + def __init__(self, global_keep_all_tokens: bool=False, import_paths: Optional[List[Union[str, Callable]]]=None, used_files: Optional[Dict[str, str]]=None) -> None: + self.global_keep_all_tokens = global_keep_all_tokens + self.import_paths = import_paths or [] + self.used_files = used_files or {} + + self._definitions: Dict[str, Definition] = {} + self._ignore_names: List[str] = [] + + def _grammar_error(self, is_term, msg, *names): + args = {} + for i, name in enumerate(names, start=1): + postfix = '' if i == 1 else str(i) + args['name' + postfix] = name + args['type' + postfix] = lowercase_type = ("rule", "terminal")[is_term] + args['Type' + postfix] = lowercase_type.title() + raise GrammarError(msg.format(**args)) + + def _check_options(self, is_term, options): + if is_term: + if options is None: + options = 1 + elif not isinstance(options, int): + raise GrammarError("Terminal require a single int as 'options' (e.g. priority), got %s" % (type(options),)) + else: + if options is None: + options = RuleOptions() + elif not isinstance(options, RuleOptions): + raise GrammarError("Rules require a RuleOptions instance as 'options'") + if self.global_keep_all_tokens: + options.keep_all_tokens = True + return options + + + def _define(self, name, is_term, exp, params=(), options=None, *, override=False): + if name in self._definitions: + if not override: + self._grammar_error(is_term, "{Type} '{name}' defined more than once", name) + elif override: + self._grammar_error(is_term, "Cannot override a nonexisting {type} {name}", name) + + if name.startswith('__'): + self._grammar_error(is_term, 'Names starting with double-underscore are reserved (Error at {name})', name) + + self._definitions[name] = Definition(is_term, exp, params, self._check_options(is_term, options)) + + def _extend(self, name, is_term, exp, params=(), options=None): + if name not in self._definitions: + self._grammar_error(is_term, "Can't extend {type} {name} as it wasn't defined before", name) + + d = self._definitions[name] + + if is_term != d.is_term: + self._grammar_error(is_term, "Cannot extend {type} {name} - one is a terminal, while the other is not.", name) + if tuple(params) != d.params: + self._grammar_error(is_term, "Cannot extend {type} with different parameters: {name}", name) + + if d.tree is None: + self._grammar_error(is_term, "Can't extend {type} {name} - it is abstract.", name) + + # TODO: think about what to do with 'options' + base = d.tree + + assert isinstance(base, Tree) and base.data == 'expansions' + base.children.insert(0, exp) + + def _ignore(self, exp_or_name): + if isinstance(exp_or_name, str): + self._ignore_names.append(exp_or_name) + else: + assert isinstance(exp_or_name, Tree) + t = exp_or_name + if t.data == 'expansions' and len(t.children) == 1: + t2 ,= t.children + if t2.data=='expansion' and len(t2.children) == 1: + item ,= t2.children + if item.data == 'value': + item ,= item.children + if isinstance(item, Terminal): + # Keep terminal name, no need to create a new definition + self._ignore_names.append(item.name) + return + + name = '__IGNORE_%d'% len(self._ignore_names) + self._ignore_names.append(name) + self._definitions[name] = Definition(True, t, options=TOKEN_DEFAULT_PRIORITY) + + def _unpack_import(self, stmt, grammar_name): + if len(stmt.children) > 1: + path_node, arg1 = stmt.children + else: + path_node, = stmt.children + arg1 = None + + if isinstance(arg1, Tree): # Multi import + dotted_path = tuple(path_node.children) + names = arg1.children + aliases = dict(zip(names, names)) # Can't have aliased multi import, so all aliases will be the same as names + else: # Single import + dotted_path = tuple(path_node.children[:-1]) + if not dotted_path: + name ,= path_node.children + raise GrammarError("Nothing was imported from grammar `%s`" % name) + name = path_node.children[-1] # Get name from dotted path + aliases = {name.value: (arg1 or name).value} # Aliases if exist + + if path_node.data == 'import_lib': # Import from library + base_path = None + else: # Relative import + if grammar_name == '': # Import relative to script file path if grammar is coded in script + try: + base_file = os.path.abspath(sys.modules['__main__'].__file__) + except AttributeError: + base_file = None + else: + base_file = grammar_name # Import relative to grammar file path if external grammar file + if base_file: + if isinstance(base_file, PackageResource): + base_path = PackageResource(base_file.pkg_name, os.path.split(base_file.path)[0]) + else: + base_path = os.path.split(base_file)[0] + else: + base_path = os.path.abspath(os.path.curdir) + + return dotted_path, base_path, aliases + + def _unpack_definition(self, tree, mangle): + + if tree.data == 'rule': + name, params, exp, opts = _make_rule_tuple(*tree.children) + is_term = False + else: + name = tree.children[0].value + params = () # TODO terminal templates + opts = int(tree.children[1]) if len(tree.children) == 3 else TOKEN_DEFAULT_PRIORITY # priority + exp = tree.children[-1] + is_term = True + + if mangle is not None: + params = tuple(mangle(p) for p in params) + name = mangle(name) + + exp = _mangle_definition_tree(exp, mangle) + return name, is_term, exp, params, opts + + + def load_grammar(self, grammar_text: str, grammar_name: str="", mangle: Optional[Callable[[str], str]]=None) -> None: + tree = _parse_grammar(grammar_text, grammar_name) + + imports: Dict[Tuple[str, ...], Tuple[Optional[str], Dict[str, str]]] = {} + + for stmt in tree.children: + if stmt.data == 'import': + dotted_path, base_path, aliases = self._unpack_import(stmt, grammar_name) + try: + import_base_path, import_aliases = imports[dotted_path] + assert base_path == import_base_path, 'Inconsistent base_path for %s.' % '.'.join(dotted_path) + import_aliases.update(aliases) + except KeyError: + imports[dotted_path] = base_path, aliases + + for dotted_path, (base_path, aliases) in imports.items(): + self.do_import(dotted_path, base_path, aliases, mangle) + + for stmt in tree.children: + if stmt.data in ('term', 'rule'): + self._define(*self._unpack_definition(stmt, mangle)) + elif stmt.data == 'override': + r ,= stmt.children + self._define(*self._unpack_definition(r, mangle), override=True) + elif stmt.data == 'extend': + r ,= stmt.children + self._extend(*self._unpack_definition(r, mangle)) + elif stmt.data == 'ignore': + # if mangle is not None, we shouldn't apply ignore, since we aren't in a toplevel grammar + if mangle is None: + self._ignore(*stmt.children) + elif stmt.data == 'declare': + for symbol in stmt.children: + assert isinstance(symbol, Symbol), symbol + is_term = isinstance(symbol, Terminal) + if mangle is None: + name = symbol.name + else: + name = mangle(symbol.name) + self._define(name, is_term, None) + elif stmt.data == 'import': + pass + else: + assert False, stmt + + + term_defs = { name: d.tree + for name, d in self._definitions.items() + if d.is_term + } + resolve_term_references(term_defs) + + + def _remove_unused(self, used): + def rule_dependencies(symbol): + try: + d = self._definitions[symbol] + except KeyError: + return [] + if d.is_term: + return [] + return _find_used_symbols(d.tree) - set(d.params) + + _used = set(bfs(used, rule_dependencies)) + self._definitions = {k: v for k, v in self._definitions.items() if k in _used} + + + def do_import(self, dotted_path: Tuple[str, ...], base_path: Optional[str], aliases: Dict[str, str], base_mangle: Optional[Callable[[str], str]]=None) -> None: + assert dotted_path + mangle = _get_mangle('__'.join(dotted_path), aliases, base_mangle) + grammar_path = os.path.join(*dotted_path) + EXT + to_try = self.import_paths + ([base_path] if base_path is not None else []) + [stdlib_loader] + for source in to_try: + try: + if callable(source): + joined_path, text = source(base_path, grammar_path) + else: + joined_path = os.path.join(source, grammar_path) + with open(joined_path, encoding='utf8') as f: + text = f.read() + except IOError: + continue + else: + h = md5_digest(text) + if self.used_files.get(joined_path, h) != h: + raise RuntimeError("Grammar file was changed during importing") + self.used_files[joined_path] = h + + gb = GrammarBuilder(self.global_keep_all_tokens, self.import_paths, self.used_files) + gb.load_grammar(text, joined_path, mangle) + gb._remove_unused(map(mangle, aliases)) + for name in gb._definitions: + if name in self._definitions: + raise GrammarError("Cannot import '%s' from '%s': Symbol already defined." % (name, grammar_path)) + + self._definitions.update(**gb._definitions) + break + else: + # Search failed. Make Python throw a nice error. + open(grammar_path, encoding='utf8') + assert False, "Couldn't import grammar %s, but a corresponding file was found at a place where lark doesn't search for it" % (dotted_path,) + + + def validate(self) -> None: + for name, d in self._definitions.items(): + params = d.params + exp = d.tree + + for i, p in enumerate(params): + if p in self._definitions: + raise GrammarError("Template Parameter conflicts with rule %s (in template %s)" % (p, name)) + if p in params[:i]: + raise GrammarError("Duplicate Template Parameter %s (in template %s)" % (p, name)) + + if exp is None: # Remaining checks don't apply to abstract rules/terminals (created with %declare) + continue + + for temp in exp.find_data('template_usage'): + sym = temp.children[0].name + args = temp.children[1:] + if sym not in params: + if sym not in self._definitions: + self._grammar_error(d.is_term, "Template '%s' used but not defined (in {type} {name})" % sym, name) + if len(args) != len(self._definitions[sym].params): + expected, actual = len(self._definitions[sym].params), len(args) + self._grammar_error(d.is_term, "Wrong number of template arguments used for {name} " + "(expected %s, got %s) (in {type2} {name2})" % (expected, actual), sym, name) + + for sym in _find_used_symbols(exp): + if sym not in self._definitions and sym not in params: + self._grammar_error(d.is_term, "{Type} '{name}' used but not defined (in {type2} {name2})", sym, name) + + if not set(self._definitions).issuperset(self._ignore_names): + raise GrammarError("Terminals %s were marked to ignore but were not defined!" % (set(self._ignore_names) - set(self._definitions))) + + def build(self) -> Grammar: + self.validate() + rule_defs = [] + term_defs = [] + for name, d in self._definitions.items(): + (params, exp, options) = d.params, d.tree, d.options + if d.is_term: + assert len(params) == 0 + term_defs.append((name, (exp, options))) + else: + rule_defs.append((name, params, exp, options)) + # resolve_term_references(term_defs) + return Grammar(rule_defs, term_defs, self._ignore_names) + + +def verify_used_files(file_hashes): + for path, old in file_hashes.items(): + text = None + if isinstance(path, str) and os.path.exists(path): + with open(path, encoding='utf8') as f: + text = f.read() + elif isinstance(path, PackageResource): + with suppress(IOError): + text = pkgutil.get_data(*path).decode('utf-8') + if text is None: # We don't know how to load the path. ignore it. + continue + + current = md5_digest(text) + if old != current: + logger.info("File %r changed, rebuilding Parser" % path) + return False + return True + +def list_grammar_imports(grammar, import_paths=[]): + "Returns a list of paths to the lark grammars imported by the given grammar (recursively)" + builder = GrammarBuilder(False, import_paths) + builder.load_grammar(grammar, '') + return list(builder.used_files.keys()) + +def load_grammar(grammar, source, import_paths, global_keep_all_tokens): + builder = GrammarBuilder(global_keep_all_tokens, import_paths) + builder.load_grammar(grammar, source) + return builder.build(), builder.used_files + + +def md5_digest(s: str) -> str: + """Get the md5 digest of a string + + Supports the `usedforsecurity` argument for Python 3.9+ to allow running on + a FIPS-enabled system. + """ + if sys.version_info >= (3, 9): + return hashlib.md5(s.encode('utf8'), usedforsecurity=False).hexdigest() + else: + return hashlib.md5(s.encode('utf8')).hexdigest() diff --git a/src/poetry/core/_vendor/lark/parse_tree_builder.py b/src/poetry/core/_vendor/lark/parse_tree_builder.py new file mode 100644 index 0000000..a6003a9 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parse_tree_builder.py @@ -0,0 +1,387 @@ +from typing import List + +from .exceptions import GrammarError, ConfigurationError +from .lexer import Token +from .tree import Tree +from .visitors import Transformer_InPlace +from .visitors import _vargs_meta, _vargs_meta_inline + +###{standalone +from functools import partial, wraps +from itertools import repeat, product + + +class ExpandSingleChild: + def __init__(self, node_builder): + self.node_builder = node_builder + + def __call__(self, children): + if len(children) == 1: + return children[0] + else: + return self.node_builder(children) + + + +class PropagatePositions: + def __init__(self, node_builder, node_filter=None): + self.node_builder = node_builder + self.node_filter = node_filter + + def __call__(self, children): + res = self.node_builder(children) + + if isinstance(res, Tree): + # Calculate positions while the tree is streaming, according to the rule: + # - nodes start at the start of their first child's container, + # and end at the end of their last child's container. + # Containers are nodes that take up space in text, but have been inlined in the tree. + + res_meta = res.meta + + first_meta = self._pp_get_meta(children) + if first_meta is not None: + if not hasattr(res_meta, 'line'): + # meta was already set, probably because the rule has been inlined (e.g. `?rule`) + res_meta.line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.column = getattr(first_meta, 'container_column', first_meta.column) + res_meta.start_pos = getattr(first_meta, 'container_start_pos', first_meta.start_pos) + res_meta.empty = False + + res_meta.container_line = getattr(first_meta, 'container_line', first_meta.line) + res_meta.container_column = getattr(first_meta, 'container_column', first_meta.column) + + last_meta = self._pp_get_meta(reversed(children)) + if last_meta is not None: + if not hasattr(res_meta, 'end_line'): + res_meta.end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + res_meta.end_pos = getattr(last_meta, 'container_end_pos', last_meta.end_pos) + res_meta.empty = False + + res_meta.container_end_line = getattr(last_meta, 'container_end_line', last_meta.end_line) + res_meta.container_end_column = getattr(last_meta, 'container_end_column', last_meta.end_column) + + return res + + def _pp_get_meta(self, children): + for c in children: + if self.node_filter is not None and not self.node_filter(c): + continue + if isinstance(c, Tree): + if not c.meta.empty: + return c.meta + elif isinstance(c, Token): + return c + elif hasattr(c, '__lark_meta__'): + return c.__lark_meta__() + +def make_propagate_positions(option): + if callable(option): + return partial(PropagatePositions, node_filter=option) + elif option is True: + return PropagatePositions + elif option is False: + return None + + raise ConfigurationError('Invalid option for propagate_positions: %r' % option) + + +class ChildFilter: + def __init__(self, to_include, append_none, node_builder): + self.node_builder = node_builder + self.to_include = to_include + self.append_none = append_none + + def __call__(self, children): + filtered = [] + + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + filtered += children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR(ChildFilter): + """Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)""" + + def __call__(self, children): + filtered = [] + for i, to_expand, add_none in self.to_include: + if add_none: + filtered += [None] * add_none + if to_expand: + if filtered: + filtered += children[i].children + else: # Optimize for left-recursion + filtered = children[i].children + else: + filtered.append(children[i]) + + if self.append_none: + filtered += [None] * self.append_none + + return self.node_builder(filtered) + + +class ChildFilterLALR_NoPlaceholders(ChildFilter): + "Optimized childfilter for LALR (assumes no duplication in parse tree, so it's safe to change it)" + def __init__(self, to_include, node_builder): + self.node_builder = node_builder + self.to_include = to_include + + def __call__(self, children): + filtered = [] + for i, to_expand in self.to_include: + if to_expand: + if filtered: + filtered += children[i].children + else: # Optimize for left-recursion + filtered = children[i].children + else: + filtered.append(children[i]) + return self.node_builder(filtered) + + +def _should_expand(sym): + return not sym.is_term and sym.name.startswith('_') + + +def maybe_create_child_filter(expansion, keep_all_tokens, ambiguous, _empty_indices: List[bool]): + # Prepare empty_indices as: How many Nones to insert at each index? + if _empty_indices: + assert _empty_indices.count(False) == len(expansion) + s = ''.join(str(int(b)) for b in _empty_indices) + empty_indices = [len(ones) for ones in s.split('0')] + assert len(empty_indices) == len(expansion)+1, (empty_indices, len(expansion)) + else: + empty_indices = [0] * (len(expansion)+1) + + to_include = [] + nones_to_add = 0 + for i, sym in enumerate(expansion): + nones_to_add += empty_indices[i] + if keep_all_tokens or not (sym.is_term and sym.filter_out): + to_include.append((i, _should_expand(sym), nones_to_add)) + nones_to_add = 0 + + nones_to_add += empty_indices[len(expansion)] + + if _empty_indices or len(to_include) < len(expansion) or any(to_expand for i, to_expand,_ in to_include): + if _empty_indices or ambiguous: + return partial(ChildFilter if ambiguous else ChildFilterLALR, to_include, nones_to_add) + else: + # LALR without placeholders + return partial(ChildFilterLALR_NoPlaceholders, [(i, x) for i,x,_ in to_include]) + + +class AmbiguousExpander: + """Deal with the case where we're expanding children ('_rule') into a parent but the children + are ambiguous. i.e. (parent->_ambig->_expand_this_rule). In this case, make the parent itself + ambiguous with as many copies as their are ambiguous children, and then copy the ambiguous children + into the right parents in the right places, essentially shifting the ambiguity up the tree.""" + def __init__(self, to_expand, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + self.to_expand = to_expand + + def __call__(self, children): + def _is_ambig_tree(t): + return hasattr(t, 'data') and t.data == '_ambig' + + # -- When we're repeatedly expanding ambiguities we can end up with nested ambiguities. + # All children of an _ambig node should be a derivation of that ambig node, hence + # it is safe to assume that if we see an _ambig node nested within an ambig node + # it is safe to simply expand it into the parent _ambig node as an alternative derivation. + ambiguous = [] + for i, child in enumerate(children): + if _is_ambig_tree(child): + if i in self.to_expand: + ambiguous.append(i) + + child.expand_kids_by_data('_ambig') + + if not ambiguous: + return self.node_builder(children) + + expand = [iter(child.children) if i in ambiguous else repeat(child) for i, child in enumerate(children)] + return self.tree_class('_ambig', [self.node_builder(list(f[0])) for f in product(zip(*expand))]) + + +def maybe_create_ambiguous_expander(tree_class, expansion, keep_all_tokens): + to_expand = [i for i, sym in enumerate(expansion) + if keep_all_tokens or ((not (sym.is_term and sym.filter_out)) and _should_expand(sym))] + if to_expand: + return partial(AmbiguousExpander, to_expand, tree_class) + + +class AmbiguousIntermediateExpander: + """ + Propagate ambiguous intermediate nodes and their derivations up to the + current rule. + + In general, converts + + rule + _iambig + _inter + someChildren1 + ... + _inter + someChildren2 + ... + someChildren3 + ... + + to + + _ambig + rule + someChildren1 + ... + someChildren3 + ... + rule + someChildren2 + ... + someChildren3 + ... + rule + childrenFromNestedIambigs + ... + someChildren3 + ... + ... + + propagating up any nested '_iambig' nodes along the way. + """ + + def __init__(self, tree_class, node_builder): + self.node_builder = node_builder + self.tree_class = tree_class + + def __call__(self, children): + def _is_iambig_tree(child): + return hasattr(child, 'data') and child.data == '_iambig' + + def _collapse_iambig(children): + """ + Recursively flatten the derivations of the parent of an '_iambig' + node. Returns a list of '_inter' nodes guaranteed not + to contain any nested '_iambig' nodes, or None if children does + not contain an '_iambig' node. + """ + + # Due to the structure of the SPPF, + # an '_iambig' node can only appear as the first child + if children and _is_iambig_tree(children[0]): + iambig_node = children[0] + result = [] + for grandchild in iambig_node.children: + collapsed = _collapse_iambig(grandchild.children) + if collapsed: + for child in collapsed: + child.children += children[1:] + result += collapsed + else: + new_tree = self.tree_class('_inter', grandchild.children + children[1:]) + result.append(new_tree) + return result + + collapsed = _collapse_iambig(children) + if collapsed: + processed_nodes = [self.node_builder(c.children) for c in collapsed] + return self.tree_class('_ambig', processed_nodes) + + return self.node_builder(children) + + + +def inplace_transformer(func): + @wraps(func) + def f(children): + # function name in a Transformer is a rule name. + tree = Tree(func.__name__, children) + return func(tree) + return f + + +def apply_visit_wrapper(func, name, wrapper): + if wrapper is _vargs_meta or wrapper is _vargs_meta_inline: + raise NotImplementedError("Meta args not supported for internal transformer") + + @wraps(func) + def f(children): + return wrapper(func, name, children, None) + return f + + +class ParseTreeBuilder: + def __init__(self, rules, tree_class, propagate_positions=False, ambiguous=False, maybe_placeholders=False): + self.tree_class = tree_class + self.propagate_positions = propagate_positions + self.ambiguous = ambiguous + self.maybe_placeholders = maybe_placeholders + + self.rule_builders = list(self._init_builders(rules)) + + def _init_builders(self, rules): + propagate_positions = make_propagate_positions(self.propagate_positions) + + for rule in rules: + options = rule.options + keep_all_tokens = options.keep_all_tokens + expand_single_child = options.expand1 + + wrapper_chain = list(filter(None, [ + (expand_single_child and not rule.alias) and ExpandSingleChild, + maybe_create_child_filter(rule.expansion, keep_all_tokens, self.ambiguous, options.empty_indices if self.maybe_placeholders else None), + propagate_positions, + self.ambiguous and maybe_create_ambiguous_expander(self.tree_class, rule.expansion, keep_all_tokens), + self.ambiguous and partial(AmbiguousIntermediateExpander, self.tree_class) + ])) + + yield rule, wrapper_chain + + def create_callback(self, transformer=None): + callbacks = {} + + default_handler = getattr(transformer, '__default__', None) + if default_handler: + def default_callback(data, children): + return default_handler(data, children, None) + else: + default_callback = self.tree_class + + for rule, wrapper_chain in self.rule_builders: + + user_callback_name = rule.alias or rule.options.template_source or rule.origin.name + try: + f = getattr(transformer, user_callback_name) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + f = apply_visit_wrapper(f, user_callback_name, wrapper) + elif isinstance(transformer, Transformer_InPlace): + f = inplace_transformer(f) + except AttributeError: + f = partial(default_callback, user_callback_name) + + for w in wrapper_chain: + f = w(f) + + if rule in callbacks: + raise GrammarError("Rule '%s' already exists" % (rule,)) + + callbacks[rule] = f + + return callbacks + +###} diff --git a/src/poetry/core/_vendor/lark/parser_frontends.py b/src/poetry/core/_vendor/lark/parser_frontends.py new file mode 100644 index 0000000..4e28e36 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parser_frontends.py @@ -0,0 +1,245 @@ +from typing import Any, Callable, Dict, Tuple + +from .exceptions import ConfigurationError, GrammarError, assert_config +from .utils import get_regexp_width, Serialize +from .parsers.grammar_analysis import GrammarAnalyzer +from .lexer import LexerThread, BasicLexer, ContextualLexer, Lexer +from .parsers import earley, xearley, cyk +from .parsers.lalr_parser import LALR_Parser +from .tree import Tree +from .common import LexerConf, ParserConf, _ParserArgType, _LexerArgType + +###{standalone + +def _wrap_lexer(lexer_class): + future_interface = getattr(lexer_class, '__future_interface__', False) + if future_interface: + return lexer_class + else: + class CustomLexerWrapper(Lexer): + def __init__(self, lexer_conf): + self.lexer = lexer_class(lexer_conf) + def lex(self, lexer_state, parser_state): + return self.lexer.lex(lexer_state.text) + return CustomLexerWrapper + + +def _deserialize_parsing_frontend(data, memo, lexer_conf, callbacks, options): + parser_conf = ParserConf.deserialize(data['parser_conf'], memo) + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + parser = cls.deserialize(data['parser'], memo, callbacks, options.debug) + parser_conf.callbacks = callbacks + return ParsingFrontend(lexer_conf, parser_conf, options, parser=parser) + + +_parser_creators: 'Dict[str, Callable[[LexerConf, Any, Any], Any]]' = {} + + +class ParsingFrontend(Serialize): + __serialize_fields__ = 'lexer_conf', 'parser_conf', 'parser' + + def __init__(self, lexer_conf, parser_conf, options, parser=None): + self.parser_conf = parser_conf + self.lexer_conf = lexer_conf + self.options = options + + # Set-up parser + if parser: # From cache + self.parser = parser + else: + create_parser = _parser_creators.get(parser_conf.parser_type) + assert create_parser is not None, "{} is not supported in standalone mode".format( + parser_conf.parser_type + ) + self.parser = create_parser(lexer_conf, parser_conf, options) + + # Set-up lexer + lexer_type = lexer_conf.lexer_type + self.skip_lexer = False + if lexer_type in ('dynamic', 'dynamic_complete'): + assert lexer_conf.postlex is None + self.skip_lexer = True + return + + try: + create_lexer = { + 'basic': create_basic_lexer, + 'contextual': create_contextual_lexer, + }[lexer_type] + except KeyError: + assert issubclass(lexer_type, Lexer), lexer_type + self.lexer = _wrap_lexer(lexer_type)(lexer_conf) + else: + self.lexer = create_lexer(lexer_conf, self.parser, lexer_conf.postlex, options) + + if lexer_conf.postlex: + self.lexer = PostLexConnector(self.lexer, lexer_conf.postlex) + + def _verify_start(self, start=None): + if start is None: + start_decls = self.parser_conf.start + if len(start_decls) > 1: + raise ConfigurationError("Lark initialized with more than 1 possible start rule. Must specify which start rule to parse", start_decls) + start ,= start_decls + elif start not in self.parser_conf.start: + raise ConfigurationError("Unknown start rule %s. Must be one of %r" % (start, self.parser_conf.start)) + return start + + def _make_lexer_thread(self, text): + cls = (self.options and self.options._plugins.get('LexerThread')) or LexerThread + return text if self.skip_lexer else cls.from_text(self.lexer, text) + + def parse(self, text, start=None, on_error=None): + chosen_start = self._verify_start(start) + kw = {} if on_error is None else {'on_error': on_error} + stream = self._make_lexer_thread(text) + return self.parser.parse(stream, chosen_start, **kw) + + def parse_interactive(self, text=None, start=None): + chosen_start = self._verify_start(start) + if self.parser_conf.parser_type != 'lalr': + raise ConfigurationError("parse_interactive() currently only works with parser='lalr' ") + stream = self._make_lexer_thread(text) + return self.parser.parse_interactive(stream, chosen_start) + + +def _validate_frontend_args(parser, lexer) -> None: + assert_config(parser, ('lalr', 'earley', 'cyk')) + if not isinstance(lexer, type): # not custom lexer? + expected = { + 'lalr': ('basic', 'contextual'), + 'earley': ('basic', 'dynamic', 'dynamic_complete'), + 'cyk': ('basic', ), + }[parser] + assert_config(lexer, expected, 'Parser %r does not support lexer %%r, expected one of %%s' % parser) + + +def _get_lexer_callbacks(transformer, terminals): + result = {} + for terminal in terminals: + callback = getattr(transformer, terminal.name, None) + if callback is not None: + result[terminal.name] = callback + return result + +class PostLexConnector: + def __init__(self, lexer, postlexer): + self.lexer = lexer + self.postlexer = postlexer + + def lex(self, lexer_state, parser_state): + i = self.lexer.lex(lexer_state, parser_state) + return self.postlexer.process(i) + + + +def create_basic_lexer(lexer_conf, parser, postlex, options): + cls = (options and options._plugins.get('BasicLexer')) or BasicLexer + return cls(lexer_conf) + +def create_contextual_lexer(lexer_conf, parser, postlex, options): + cls = (options and options._plugins.get('ContextualLexer')) or ContextualLexer + states = {idx:list(t.keys()) for idx, t in parser._parse_table.states.items()} + always_accept = postlex.always_accept if postlex else () + return cls(lexer_conf, states, always_accept=always_accept) + +def create_lalr_parser(lexer_conf, parser_conf, options=None): + debug = options.debug if options else False + cls = (options and options._plugins.get('LALR_Parser')) or LALR_Parser + return cls(parser_conf, debug=debug) + +_parser_creators['lalr'] = create_lalr_parser + +###} + +class EarleyRegexpMatcher: + def __init__(self, lexer_conf): + self.regexps = {} + for t in lexer_conf.terminals: + regexp = t.pattern.to_regexp() + try: + width = get_regexp_width(regexp)[0] + except ValueError: + raise GrammarError("Bad regexp in token %s: %s" % (t.name, regexp)) + else: + if width == 0: + raise GrammarError("Dynamic Earley doesn't allow zero-width regexps", t) + if lexer_conf.use_bytes: + regexp = regexp.encode('utf-8') + + self.regexps[t.name] = lexer_conf.re_module.compile(regexp, lexer_conf.g_regex_flags) + + def match(self, term, text, index=0): + return self.regexps[term.name].match(text, index) + + +def create_earley_parser__dynamic(lexer_conf, parser_conf, options=None, **kw): + if lexer_conf.callbacks: + raise GrammarError("Earley's dynamic lexer doesn't support lexer_callbacks.") + + earley_matcher = EarleyRegexpMatcher(lexer_conf) + return xearley.Parser(lexer_conf, parser_conf, earley_matcher.match, **kw) + +def _match_earley_basic(term, token): + return term.name == token.type + +def create_earley_parser__basic(lexer_conf, parser_conf, options, **kw): + return earley.Parser(lexer_conf, parser_conf, _match_earley_basic, **kw) + +def create_earley_parser(lexer_conf, parser_conf, options): + resolve_ambiguity = options.ambiguity == 'resolve' + debug = options.debug if options else False + tree_class = options.tree_class or Tree if options.ambiguity != 'forest' else None + + extra = {} + if lexer_conf.lexer_type == 'dynamic': + f = create_earley_parser__dynamic + elif lexer_conf.lexer_type == 'dynamic_complete': + extra['complete_lex'] =True + f = create_earley_parser__dynamic + else: + f = create_earley_parser__basic + + return f(lexer_conf, parser_conf, options, resolve_ambiguity=resolve_ambiguity, debug=debug, tree_class=tree_class, **extra) + + + +class CYK_FrontEnd: + def __init__(self, lexer_conf, parser_conf, options=None): + self._analysis = GrammarAnalyzer(parser_conf) + self.parser = cyk.Parser(parser_conf.rules) + + self.callbacks = parser_conf.callbacks + + def parse(self, lexer_thread, start): + tokens = list(lexer_thread.lex(None)) + tree = self.parser.parse(tokens, start) + return self._transform(tree) + + def _transform(self, tree): + subtrees = list(tree.iter_subtrees()) + for subtree in subtrees: + subtree.children = [self._apply_callback(c) if isinstance(c, Tree) else c for c in subtree.children] + + return self._apply_callback(tree) + + def _apply_callback(self, tree): + return self.callbacks[tree.rule](tree.children) + + +_parser_creators['earley'] = create_earley_parser +_parser_creators['cyk'] = CYK_FrontEnd + + +def _construct_parsing_frontend( + parser_type: _ParserArgType, + lexer_type: _LexerArgType, + lexer_conf, + parser_conf, + options +): + assert isinstance(lexer_conf, LexerConf) + assert isinstance(parser_conf, ParserConf) + parser_conf.parser_type = parser_type + lexer_conf.lexer_type = lexer_type + return ParsingFrontend(lexer_conf, parser_conf, options) diff --git a/src/poetry/core/_vendor/lark/parsers/__init__.py b/src/poetry/core/_vendor/lark/parsers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/lark/parsers/cyk.py b/src/poetry/core/_vendor/lark/parsers/cyk.py new file mode 100644 index 0000000..82818cc --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/cyk.py @@ -0,0 +1,345 @@ +"""This module implements a CYK parser.""" + +# Author: https://github.com/ehudt (2018) +# +# Adapted by Erez + + +from collections import defaultdict +import itertools + +from ..exceptions import ParseError +from ..lexer import Token +from ..tree import Tree +from ..grammar import Terminal as T, NonTerminal as NT, Symbol + +try: + xrange +except NameError: + xrange = range + +def match(t, s): + assert isinstance(t, T) + return t.name == s.type + + +class Rule: + """Context-free grammar rule.""" + + def __init__(self, lhs, rhs, weight, alias): + super(Rule, self).__init__() + assert isinstance(lhs, NT), lhs + assert all(isinstance(x, NT) or isinstance(x, T) for x in rhs), rhs + self.lhs = lhs + self.rhs = rhs + self.weight = weight + self.alias = alias + + def __str__(self): + return '%s -> %s' % (str(self.lhs), ' '.join(str(x) for x in self.rhs)) + + def __repr__(self): + return str(self) + + def __hash__(self): + return hash((self.lhs, tuple(self.rhs))) + + def __eq__(self, other): + return self.lhs == other.lhs and self.rhs == other.rhs + + def __ne__(self, other): + return not (self == other) + + +class Grammar: + """Context-free grammar.""" + + def __init__(self, rules): + self.rules = frozenset(rules) + + def __eq__(self, other): + return self.rules == other.rules + + def __str__(self): + return '\n' + '\n'.join(sorted(repr(x) for x in self.rules)) + '\n' + + def __repr__(self): + return str(self) + + +# Parse tree data structures +class RuleNode: + """A node in the parse tree, which also contains the full rhs rule.""" + + def __init__(self, rule, children, weight=0): + self.rule = rule + self.children = children + self.weight = weight + + def __repr__(self): + return 'RuleNode(%s, [%s])' % (repr(self.rule.lhs), ', '.join(str(x) for x in self.children)) + + + +class Parser: + """Parser wrapper.""" + + def __init__(self, rules): + super(Parser, self).__init__() + self.orig_rules = {rule: rule for rule in rules} + rules = [self._to_rule(rule) for rule in rules] + self.grammar = to_cnf(Grammar(rules)) + + def _to_rule(self, lark_rule): + """Converts a lark rule, (lhs, rhs, callback, options), to a Rule.""" + assert isinstance(lark_rule.origin, NT) + assert all(isinstance(x, Symbol) for x in lark_rule.expansion) + return Rule( + lark_rule.origin, lark_rule.expansion, + weight=lark_rule.options.priority if lark_rule.options.priority else 0, + alias=lark_rule) + + def parse(self, tokenized, start): # pylint: disable=invalid-name + """Parses input, which is a list of tokens.""" + assert start + start = NT(start) + + table, trees = _parse(tokenized, self.grammar) + # Check if the parse succeeded. + if all(r.lhs != start for r in table[(0, len(tokenized) - 1)]): + raise ParseError('Parsing failed.') + parse = trees[(0, len(tokenized) - 1)][start] + return self._to_tree(revert_cnf(parse)) + + def _to_tree(self, rule_node): + """Converts a RuleNode parse tree to a lark Tree.""" + orig_rule = self.orig_rules[rule_node.rule.alias] + children = [] + for child in rule_node.children: + if isinstance(child, RuleNode): + children.append(self._to_tree(child)) + else: + assert isinstance(child.name, Token) + children.append(child.name) + t = Tree(orig_rule.origin, children) + t.rule=orig_rule + return t + + +def print_parse(node, indent=0): + if isinstance(node, RuleNode): + print(' ' * (indent * 2) + str(node.rule.lhs)) + for child in node.children: + print_parse(child, indent + 1) + else: + print(' ' * (indent * 2) + str(node.s)) + + +def _parse(s, g): + """Parses sentence 's' using CNF grammar 'g'.""" + # The CYK table. Indexed with a 2-tuple: (start pos, end pos) + table = defaultdict(set) + # Top-level structure is similar to the CYK table. Each cell is a dict from + # rule name to the best (lightest) tree for that rule. + trees = defaultdict(dict) + # Populate base case with existing terminal production rules + for i, w in enumerate(s): + for terminal, rules in g.terminal_rules.items(): + if match(terminal, w): + for rule in rules: + table[(i, i)].add(rule) + if (rule.lhs not in trees[(i, i)] or + rule.weight < trees[(i, i)][rule.lhs].weight): + trees[(i, i)][rule.lhs] = RuleNode(rule, [T(w)], weight=rule.weight) + + # Iterate over lengths of sub-sentences + for l in xrange(2, len(s) + 1): + # Iterate over sub-sentences with the given length + for i in xrange(len(s) - l + 1): + # Choose partition of the sub-sentence in [1, l) + for p in xrange(i + 1, i + l): + span1 = (i, p - 1) + span2 = (p, i + l - 1) + for r1, r2 in itertools.product(table[span1], table[span2]): + for rule in g.nonterminal_rules.get((r1.lhs, r2.lhs), []): + table[(i, i + l - 1)].add(rule) + r1_tree = trees[span1][r1.lhs] + r2_tree = trees[span2][r2.lhs] + rule_total_weight = rule.weight + r1_tree.weight + r2_tree.weight + if (rule.lhs not in trees[(i, i + l - 1)] + or rule_total_weight < trees[(i, i + l - 1)][rule.lhs].weight): + trees[(i, i + l - 1)][rule.lhs] = RuleNode(rule, [r1_tree, r2_tree], weight=rule_total_weight) + return table, trees + + +# This section implements context-free grammar converter to Chomsky normal form. +# It also implements a conversion of parse trees from its CNF to the original +# grammar. +# Overview: +# Applies the following operations in this order: +# * TERM: Eliminates non-solitary terminals from all rules +# * BIN: Eliminates rules with more than 2 symbols on their right-hand-side. +# * UNIT: Eliminates non-terminal unit rules +# +# The following grammar characteristics aren't featured: +# * Start symbol appears on RHS +# * Empty rules (epsilon rules) + + +class CnfWrapper: + """CNF wrapper for grammar. + + Validates that the input grammar is CNF and provides helper data structures. + """ + + def __init__(self, grammar): + super(CnfWrapper, self).__init__() + self.grammar = grammar + self.rules = grammar.rules + self.terminal_rules = defaultdict(list) + self.nonterminal_rules = defaultdict(list) + for r in self.rules: + # Validate that the grammar is CNF and populate auxiliary data structures. + assert isinstance(r.lhs, NT), r + if len(r.rhs) not in [1, 2]: + raise ParseError("CYK doesn't support empty rules") + if len(r.rhs) == 1 and isinstance(r.rhs[0], T): + self.terminal_rules[r.rhs[0]].append(r) + elif len(r.rhs) == 2 and all(isinstance(x, NT) for x in r.rhs): + self.nonterminal_rules[tuple(r.rhs)].append(r) + else: + assert False, r + + def __eq__(self, other): + return self.grammar == other.grammar + + def __repr__(self): + return repr(self.grammar) + + +class UnitSkipRule(Rule): + """A rule that records NTs that were skipped during transformation.""" + + def __init__(self, lhs, rhs, skipped_rules, weight, alias): + super(UnitSkipRule, self).__init__(lhs, rhs, weight, alias) + self.skipped_rules = skipped_rules + + def __eq__(self, other): + return isinstance(other, type(self)) and self.skipped_rules == other.skipped_rules + + __hash__ = Rule.__hash__ + + +def build_unit_skiprule(unit_rule, target_rule): + skipped_rules = [] + if isinstance(unit_rule, UnitSkipRule): + skipped_rules += unit_rule.skipped_rules + skipped_rules.append(target_rule) + if isinstance(target_rule, UnitSkipRule): + skipped_rules += target_rule.skipped_rules + return UnitSkipRule(unit_rule.lhs, target_rule.rhs, skipped_rules, + weight=unit_rule.weight + target_rule.weight, alias=unit_rule.alias) + + +def get_any_nt_unit_rule(g): + """Returns a non-terminal unit rule from 'g', or None if there is none.""" + for rule in g.rules: + if len(rule.rhs) == 1 and isinstance(rule.rhs[0], NT): + return rule + return None + + +def _remove_unit_rule(g, rule): + """Removes 'rule' from 'g' without changing the langugage produced by 'g'.""" + new_rules = [x for x in g.rules if x != rule] + refs = [x for x in g.rules if x.lhs == rule.rhs[0]] + new_rules += [build_unit_skiprule(rule, ref) for ref in refs] + return Grammar(new_rules) + + +def _split(rule): + """Splits a rule whose len(rhs) > 2 into shorter rules.""" + rule_str = str(rule.lhs) + '__' + '_'.join(str(x) for x in rule.rhs) + rule_name = '__SP_%s' % (rule_str) + '_%d' + yield Rule(rule.lhs, [rule.rhs[0], NT(rule_name % 1)], weight=rule.weight, alias=rule.alias) + for i in xrange(1, len(rule.rhs) - 2): + yield Rule(NT(rule_name % i), [rule.rhs[i], NT(rule_name % (i + 1))], weight=0, alias='Split') + yield Rule(NT(rule_name % (len(rule.rhs) - 2)), rule.rhs[-2:], weight=0, alias='Split') + + +def _term(g): + """Applies the TERM rule on 'g' (see top comment).""" + all_t = {x for rule in g.rules for x in rule.rhs if isinstance(x, T)} + t_rules = {t: Rule(NT('__T_%s' % str(t)), [t], weight=0, alias='Term') for t in all_t} + new_rules = [] + for rule in g.rules: + if len(rule.rhs) > 1 and any(isinstance(x, T) for x in rule.rhs): + new_rhs = [t_rules[x].lhs if isinstance(x, T) else x for x in rule.rhs] + new_rules.append(Rule(rule.lhs, new_rhs, weight=rule.weight, alias=rule.alias)) + new_rules.extend(v for k, v in t_rules.items() if k in rule.rhs) + else: + new_rules.append(rule) + return Grammar(new_rules) + + +def _bin(g): + """Applies the BIN rule to 'g' (see top comment).""" + new_rules = [] + for rule in g.rules: + if len(rule.rhs) > 2: + new_rules += _split(rule) + else: + new_rules.append(rule) + return Grammar(new_rules) + + +def _unit(g): + """Applies the UNIT rule to 'g' (see top comment).""" + nt_unit_rule = get_any_nt_unit_rule(g) + while nt_unit_rule: + g = _remove_unit_rule(g, nt_unit_rule) + nt_unit_rule = get_any_nt_unit_rule(g) + return g + + +def to_cnf(g): + """Creates a CNF grammar from a general context-free grammar 'g'.""" + g = _unit(_bin(_term(g))) + return CnfWrapper(g) + + +def unroll_unit_skiprule(lhs, orig_rhs, skipped_rules, children, weight, alias): + if not skipped_rules: + return RuleNode(Rule(lhs, orig_rhs, weight=weight, alias=alias), children, weight=weight) + else: + weight = weight - skipped_rules[0].weight + return RuleNode( + Rule(lhs, [skipped_rules[0].lhs], weight=weight, alias=alias), [ + unroll_unit_skiprule(skipped_rules[0].lhs, orig_rhs, + skipped_rules[1:], children, + skipped_rules[0].weight, skipped_rules[0].alias) + ], weight=weight) + + +def revert_cnf(node): + """Reverts a parse tree (RuleNode) to its original non-CNF form (Node).""" + if isinstance(node, T): + return node + # Reverts TERM rule. + if node.rule.lhs.name.startswith('__T_'): + return node.children[0] + else: + children = [] + for child in map(revert_cnf, node.children): + # Reverts BIN rule. + if isinstance(child, RuleNode) and child.rule.lhs.name.startswith('__SP_'): + children += child.children + else: + children.append(child) + # Reverts UNIT rule. + if isinstance(node.rule, UnitSkipRule): + return unroll_unit_skiprule(node.rule.lhs, node.rule.rhs, + node.rule.skipped_rules, children, + node.rule.weight, node.rule.alias) + else: + return RuleNode(node.rule, children) diff --git a/src/poetry/core/_vendor/lark/parsers/earley.py b/src/poetry/core/_vendor/lark/parsers/earley.py new file mode 100644 index 0000000..2a047b0 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/earley.py @@ -0,0 +1,295 @@ +"""This module implements an Earley parser. + +The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: + https://www.sciencedirect.com/science/article/pii/S1571066108001497 + +That is probably the best reference for understanding the algorithm here. + +The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format +is explained here: https://lark-parser.readthedocs.io/en/latest/_static/sppf/sppf.html +""" + +from collections import deque + +from ..lexer import Token +from ..tree import Tree +from ..exceptions import UnexpectedEOF, UnexpectedToken +from ..utils import logger +from .grammar_analysis import GrammarAnalyzer +from ..grammar import NonTerminal +from .earley_common import Item +from .earley_forest import ForestSumVisitor, SymbolNode, TokenNode, ForestToParseTree + +class Parser: + def __init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity=True, debug=False, tree_class=Tree): + analysis = GrammarAnalyzer(parser_conf) + self.lexer_conf = lexer_conf + self.parser_conf = parser_conf + self.resolve_ambiguity = resolve_ambiguity + self.debug = debug + self.tree_class = tree_class + + self.FIRST = analysis.FIRST + self.NULLABLE = analysis.NULLABLE + self.callbacks = parser_conf.callbacks + self.predictions = {} + + ## These could be moved to the grammar analyzer. Pre-computing these is *much* faster than + # the slow 'isupper' in is_terminal. + self.TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if sym.is_term } + self.NON_TERMINALS = { sym for r in parser_conf.rules for sym in r.expansion if not sym.is_term } + + self.forest_sum_visitor = None + for rule in parser_conf.rules: + if rule.origin not in self.predictions: + self.predictions[rule.origin] = [x.rule for x in analysis.expand_rule(rule.origin)] + + ## Detect if any rules/terminals have priorities set. If the user specified priority = None, then + # the priorities will be stripped from all rules/terminals before they reach us, allowing us to + # skip the extra tree walk. We'll also skip this if the user just didn't specify priorities + # on any rules/terminals. + if self.forest_sum_visitor is None and rule.options.priority is not None: + self.forest_sum_visitor = ForestSumVisitor + + # Check terminals for priorities + # Ignore terminal priorities if the basic lexer is used + if self.lexer_conf.lexer_type != 'basic' and self.forest_sum_visitor is None: + for term in self.lexer_conf.terminals: + if term.priority: + self.forest_sum_visitor = ForestSumVisitor + break + + self.term_matcher = term_matcher + + + def predict_and_complete(self, i, to_scan, columns, transitives): + """The core Earley Predictor and Completer. + + At each stage of the input, we handling any completed items (things + that matched on the last cycle) and use those to predict what should + come next in the input stream. The completions and any predicted + non-terminals are recursively processed until we reach a set of, + which can be added to the scan list for the next scanner cycle.""" + # Held Completions (H in E.Scotts paper). + node_cache = {} + held_completions = {} + + column = columns[i] + # R (items) = Ei (column.items) + items = deque(column) + while items: + item = items.pop() # remove an element, A say, from R + + ### The Earley completer + if item.is_complete: ### (item.s == string) + if item.node is None: + label = (item.s, item.start, i) + item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + item.node.add_family(item.s, item.rule, item.start, None, None) + + # create_leo_transitives(item.rule.origin, item.start) + + ###R Joop Leo right recursion Completer + if item.rule.origin in transitives[item.start]: + transitive = transitives[item.start][item.s] + if transitive.previous in transitives[transitive.column]: + root_transitive = transitives[transitive.column][transitive.previous] + else: + root_transitive = transitive + + new_item = Item(transitive.rule, transitive.ptr, transitive.start) + label = (root_transitive.s, root_transitive.start, i) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node.add_path(root_transitive, item.node) + if new_item.expect in self.TERMINALS: + # Add (B :: aC.B, h, y) to Q + to_scan.add(new_item) + elif new_item not in column: + # Add (B :: aC.B, h, y) to Ei and R + column.add(new_item) + items.append(new_item) + ###R Regular Earley completer + else: + # Empty has 0 length. If we complete an empty symbol in a particular + # parse step, we need to be able to use that same empty symbol to complete + # any predictions that result, that themselves require empty. Avoids + # infinite recursion on empty symbols. + # held_completions is 'H' in E.Scott's paper. + is_empty_item = item.start == i + if is_empty_item: + held_completions[item.rule.origin] = item.node + + originators = [originator for originator in columns[item.start] if originator.expect is not None and originator.expect == item.s] + for originator in originators: + new_item = originator.advance() + label = (new_item.s, originator.start, i) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node.add_family(new_item.s, new_item.rule, i, originator.node, item.node) + if new_item.expect in self.TERMINALS: + # Add (B :: aC.B, h, y) to Q + to_scan.add(new_item) + elif new_item not in column: + # Add (B :: aC.B, h, y) to Ei and R + column.add(new_item) + items.append(new_item) + + ### The Earley predictor + elif item.expect in self.NON_TERMINALS: ### (item.s == lr0) + new_items = [] + for rule in self.predictions[item.expect]: + new_item = Item(rule, 0, i) + new_items.append(new_item) + + # Process any held completions (H). + if item.expect in held_completions: + new_item = item.advance() + label = (new_item.s, item.start, i) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node.add_family(new_item.s, new_item.rule, new_item.start, item.node, held_completions[item.expect]) + new_items.append(new_item) + + for new_item in new_items: + if new_item.expect in self.TERMINALS: + to_scan.add(new_item) + elif new_item not in column: + column.add(new_item) + items.append(new_item) + + def _parse(self, lexer, columns, to_scan, start_symbol=None): + def is_quasi_complete(item): + if item.is_complete: + return True + + quasi = item.advance() + while not quasi.is_complete: + if quasi.expect not in self.NULLABLE: + return False + if quasi.rule.origin == start_symbol and quasi.expect == start_symbol: + return False + quasi = quasi.advance() + return True + + # def create_leo_transitives(origin, start): + # ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 + + def scan(i, token, to_scan): + """The core Earley Scanner. + + This is a custom implementation of the scanner that uses the + Lark lexer to match tokens. The scan list is built by the + Earley predictor, based on the previously completed tokens. + This ensures that at each phase of the parse we have a custom + lexer context, allowing for more complex ambiguities.""" + next_to_scan = set() + next_set = set() + columns.append(next_set) + transitives.append({}) + node_cache = {} + + for item in set(to_scan): + if match(item.expect, token): + new_item = item.advance() + label = (new_item.s, new_item.start, i) + # 'terminals' may not contain token.type when using %declare + # Additionally, token is not always a Token + # For example, it can be a Tree when using TreeMatcher + term = terminals.get(token.type) if isinstance(token, Token) else None + # Set the priority of the token node to 0 so that the + # terminal priorities do not affect the Tree chosen by + # ForestSumVisitor after the basic lexer has already + # "used up" the terminal priorities + token_node = TokenNode(token, term, priority=0) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) + + if new_item.expect in self.TERMINALS: + # add (B ::= Aai+1.B, h, y) to Q' + next_to_scan.add(new_item) + else: + # add (B ::= Aa+1.B, h, y) to Ei+1 + next_set.add(new_item) + + if not next_set and not next_to_scan: + expect = {i.expect.name for i in to_scan} + raise UnexpectedToken(token, expect, considered_rules=set(to_scan), state=frozenset(i.s for i in to_scan)) + + return next_to_scan + + + # Define parser functions + match = self.term_matcher + + terminals = self.lexer_conf.terminals_by_name + + # Cache for nodes & tokens created in a particular parse step. + transitives = [{}] + + ## The main Earley loop. + # Run the Prediction/Completion cycle for any Items in the current Earley set. + # Completions will be added to the SPPF tree, and predictions will be recursively + # processed down to terminals/empty nodes to be added to the scanner for the next + # step. + expects = {i.expect for i in to_scan} + i = 0 + for token in lexer.lex(expects): + self.predict_and_complete(i, to_scan, columns, transitives) + + to_scan = scan(i, token, to_scan) + i += 1 + + expects.clear() + expects |= {i.expect for i in to_scan} + + self.predict_and_complete(i, to_scan, columns, transitives) + + ## Column is now the final column in the parse. + assert i == len(columns)-1 + return to_scan + + def parse(self, lexer, start): + assert start, start + start_symbol = NonTerminal(start) + + columns = [set()] + to_scan = set() # The scan buffer. 'Q' in E.Scott's paper. + + ## Predict for the start_symbol. + # Add predicted items to the first Earley set (for the predictor) if they + # result in a non-terminal, or the scanner if they result in a terminal. + for rule in self.predictions[start_symbol]: + item = Item(rule, 0, 0) + if item.expect in self.TERMINALS: + to_scan.add(item) + else: + columns[0].add(item) + + to_scan = self._parse(lexer, columns, to_scan, start_symbol) + + # If the parse was successful, the start + # symbol should have been completed in the last step of the Earley cycle, and will be in + # this column. Find the item for the start_symbol, which is the root of the SPPF tree. + solutions = [n.node for n in columns[-1] if n.is_complete and n.node is not None and n.s == start_symbol and n.start == 0] + if not solutions: + expected_terminals = [t.expect.name for t in to_scan] + raise UnexpectedEOF(expected_terminals, state=frozenset(i.s for i in to_scan)) + + if self.debug: + from .earley_forest import ForestToPyDotVisitor + try: + debug_walker = ForestToPyDotVisitor() + except ImportError: + logger.warning("Cannot find dependency 'pydot', will not generate sppf debug image") + else: + debug_walker.visit(solutions[0], "sppf.png") + + + if len(solutions) > 1: + assert False, 'Earley should not generate multiple start symbol items!' + + if self.tree_class is not None: + # Perform our SPPF -> AST conversion + transformer = ForestToParseTree(self.tree_class, self.callbacks, self.forest_sum_visitor and self.forest_sum_visitor(), self.resolve_ambiguity) + return transformer.transform(solutions[0]) + + # return the root of the SPPF + return solutions[0] diff --git a/src/poetry/core/_vendor/lark/parsers/earley_common.py b/src/poetry/core/_vendor/lark/parsers/earley_common.py new file mode 100644 index 0000000..46e242b --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/earley_common.py @@ -0,0 +1,42 @@ +"""This module implements useful building blocks for the Earley parser +""" + + +class Item: + "An Earley Item, the atom of the algorithm." + + __slots__ = ('s', 'rule', 'ptr', 'start', 'is_complete', 'expect', 'previous', 'node', '_hash') + def __init__(self, rule, ptr, start): + self.is_complete = len(rule.expansion) == ptr + self.rule = rule # rule + self.ptr = ptr # ptr + self.start = start # j + self.node = None # w + if self.is_complete: + self.s = rule.origin + self.expect = None + self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None + else: + self.s = (rule, ptr) + self.expect = rule.expansion[ptr] + self.previous = rule.expansion[ptr - 1] if ptr > 0 and len(rule.expansion) else None + self._hash = hash((self.s, self.start)) + + def advance(self): + return Item(self.rule, self.ptr + 1, self.start) + + def __eq__(self, other): + return self is other or (self.s == other.s and self.start == other.start) + + def __hash__(self): + return self._hash + + def __repr__(self): + before = ( expansion.name for expansion in self.rule.expansion[:self.ptr] ) + after = ( expansion.name for expansion in self.rule.expansion[self.ptr:] ) + symbol = "{} ::= {}* {}".format(self.rule.origin.name, ' '.join(before), ' '.join(after)) + return '%s (%d)' % (symbol, self.start) + + +# class TransitiveItem(Item): +# ... # removed at commit 4c1cfb2faf24e8f8bff7112627a00b94d261b420 diff --git a/src/poetry/core/_vendor/lark/parsers/earley_forest.py b/src/poetry/core/_vendor/lark/parsers/earley_forest.py new file mode 100644 index 0000000..5892c78 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/earley_forest.py @@ -0,0 +1,804 @@ +""""This module implements an SPPF implementation + +This is used as the primary output mechanism for the Earley parser +in order to store complex ambiguities. + +Full reference and more details is here: +https://web.archive.org/web/20190616123959/http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ +""" + +from random import randint +from collections import deque +from operator import attrgetter +from importlib import import_module +from functools import partial + +from ..parse_tree_builder import AmbiguousIntermediateExpander +from ..visitors import Discard +from ..lexer import Token +from ..utils import logger +from ..tree import Tree + +class ForestNode: + pass + +class SymbolNode(ForestNode): + """ + A Symbol Node represents a symbol (or Intermediate LR0). + + Symbol nodes are keyed by the symbol (s). For intermediate nodes + s will be an LR0, stored as a tuple of (rule, ptr). For completed symbol + nodes, s will be a string representing the non-terminal origin (i.e. + the left hand side of the rule). + + The children of a Symbol or Intermediate Node will always be Packed Nodes; + with each Packed Node child representing a single derivation of a production. + + Hence a Symbol Node with a single child is unambiguous. + + Parameters: + s: A Symbol, or a tuple of (rule, ptr) for an intermediate node. + start: The index of the start of the substring matched by this symbol (inclusive). + end: The index of the end of the substring matched by this symbol (exclusive). + + Properties: + is_intermediate: True if this node is an intermediate node. + priority: The priority of the node's symbol. + """ + __slots__ = ('s', 'start', 'end', '_children', 'paths', 'paths_loaded', 'priority', 'is_intermediate', '_hash') + def __init__(self, s, start, end): + self.s = s + self.start = start + self.end = end + self._children = set() + self.paths = set() + self.paths_loaded = False + + ### We use inf here as it can be safely negated without resorting to conditionals, + # unlike None or float('NaN'), and sorts appropriately. + self.priority = float('-inf') + self.is_intermediate = isinstance(s, tuple) + self._hash = hash((self.s, self.start, self.end)) + + def add_family(self, lr0, rule, start, left, right): + self._children.add(PackedNode(self, lr0, rule, start, left, right)) + + def add_path(self, transitive, node): + self.paths.add((transitive, node)) + + def load_paths(self): + for transitive, node in self.paths: + if transitive.next_titem is not None: + vn = SymbolNode(transitive.next_titem.s, transitive.next_titem.start, self.end) + vn.add_path(transitive.next_titem, node) + self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, vn) + else: + self.add_family(transitive.reduction.rule.origin, transitive.reduction.rule, transitive.reduction.start, transitive.reduction.node, node) + self.paths_loaded = True + + @property + def is_ambiguous(self): + """Returns True if this node is ambiguous.""" + return len(self.children) > 1 + + @property + def children(self): + """Returns a list of this node's children sorted from greatest to + least priority.""" + if not self.paths_loaded: self.load_paths() + return sorted(self._children, key=attrgetter('sort_key')) + + def __iter__(self): + return iter(self._children) + + def __eq__(self, other): + if not isinstance(other, SymbolNode): + return False + return self is other or (type(self.s) == type(other.s) and self.s == other.s and self.start == other.start and self.end is other.end) + + def __hash__(self): + return self._hash + + def __repr__(self): + if self.is_intermediate: + rule = self.s[0] + ptr = self.s[1] + before = ( expansion.name for expansion in rule.expansion[:ptr] ) + after = ( expansion.name for expansion in rule.expansion[ptr:] ) + symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) + else: + symbol = self.s.name + return "({}, {}, {}, {})".format(symbol, self.start, self.end, self.priority) + +class PackedNode(ForestNode): + """ + A Packed Node represents a single derivation in a symbol node. + + Parameters: + rule: The rule associated with this node. + parent: The parent of this node. + left: The left child of this node. ``None`` if one does not exist. + right: The right child of this node. ``None`` if one does not exist. + priority: The priority of this node. + """ + __slots__ = ('parent', 's', 'rule', 'start', 'left', 'right', 'priority', '_hash') + def __init__(self, parent, s, rule, start, left, right): + self.parent = parent + self.s = s + self.start = start + self.rule = rule + self.left = left + self.right = right + self.priority = float('-inf') + self._hash = hash((self.left, self.right)) + + @property + def is_empty(self): + return self.left is None and self.right is None + + @property + def sort_key(self): + """ + Used to sort PackedNode children of SymbolNodes. + A SymbolNode has multiple PackedNodes if it matched + ambiguously. Hence, we use the sort order to identify + the order in which ambiguous children should be considered. + """ + return self.is_empty, -self.priority, self.rule.order + + @property + def children(self): + """Returns a list of this node's children.""" + return [x for x in [self.left, self.right] if x is not None] + + def __iter__(self): + yield self.left + yield self.right + + def __eq__(self, other): + if not isinstance(other, PackedNode): + return False + return self is other or (self.left == other.left and self.right == other.right) + + def __hash__(self): + return self._hash + + def __repr__(self): + if isinstance(self.s, tuple): + rule = self.s[0] + ptr = self.s[1] + before = ( expansion.name for expansion in rule.expansion[:ptr] ) + after = ( expansion.name for expansion in rule.expansion[ptr:] ) + symbol = "{} ::= {}* {}".format(rule.origin.name, ' '.join(before), ' '.join(after)) + else: + symbol = self.s.name + return "({}, {}, {}, {})".format(symbol, self.start, self.priority, self.rule.order) + +class TokenNode(ForestNode): + """ + A Token Node represents a matched terminal and is always a leaf node. + + Parameters: + token: The Token associated with this node. + term: The TerminalDef matched by the token. + priority: The priority of this node. + """ + __slots__ = ('token', 'term', 'priority', '_hash') + def __init__(self, token, term, priority=None): + self.token = token + self.term = term + if priority is not None: + self.priority = priority + else: + self.priority = term.priority if term is not None else 0 + self._hash = hash(token) + + def __eq__(self, other): + if not isinstance(other, TokenNode): + return False + return self is other or (self.token == other.token) + + def __hash__(self): + return self._hash + + def __repr__(self): + return repr(self.token) + +class ForestVisitor: + """ + An abstract base class for building forest visitors. + + This class performs a controllable depth-first walk of an SPPF. + The visitor will not enter cycles and will backtrack if one is encountered. + Subclasses are notified of cycles through the ``on_cycle`` method. + + Behavior for visit events is defined by overriding the + ``visit*node*`` functions. + + The walk is controlled by the return values of the ``visit*node_in`` + methods. Returning a node(s) will schedule them to be visited. The visitor + will begin to backtrack if no nodes are returned. + + Parameters: + single_visit: If ``True``, non-Token nodes will only be visited once. + """ + + def __init__(self, single_visit=False): + self.single_visit = single_visit + + def visit_token_node(self, node): + """Called when a ``Token`` is visited. ``Token`` nodes are always leaves.""" + pass + + def visit_symbol_node_in(self, node): + """Called when a symbol node is visited. Nodes that are returned + will be scheduled to be visited. If ``visit_intermediate_node_in`` + is not implemented, this function will be called for intermediate + nodes as well.""" + pass + + def visit_symbol_node_out(self, node): + """Called after all nodes returned from a corresponding ``visit_symbol_node_in`` + call have been visited. If ``visit_intermediate_node_out`` + is not implemented, this function will be called for intermediate + nodes as well.""" + pass + + def visit_packed_node_in(self, node): + """Called when a packed node is visited. Nodes that are returned + will be scheduled to be visited. """ + pass + + def visit_packed_node_out(self, node): + """Called after all nodes returned from a corresponding ``visit_packed_node_in`` + call have been visited.""" + pass + + def on_cycle(self, node, path): + """Called when a cycle is encountered. + + Parameters: + node: The node that causes a cycle. + path: The list of nodes being visited: nodes that have been + entered but not exited. The first element is the root in a forest + visit, and the last element is the node visited most recently. + ``path`` should be treated as read-only. + """ + pass + + def get_cycle_in_path(self, node, path): + """A utility function for use in ``on_cycle`` to obtain a slice of + ``path`` that only contains the nodes that make up the cycle.""" + index = len(path) - 1 + while id(path[index]) != id(node): + index -= 1 + return path[index:] + + def visit(self, root): + # Visiting is a list of IDs of all symbol/intermediate nodes currently in + # the stack. It serves two purposes: to detect when we 'recurse' in and out + # of a symbol/intermediate so that we can process both up and down. Also, + # since the SPPF can have cycles it allows us to detect if we're trying + # to recurse into a node that's already on the stack (infinite recursion). + visiting = set() + + # set of all nodes that have been visited + visited = set() + + # a list of nodes that are currently being visited + # used for the `on_cycle` callback + path = [] + + # We do not use recursion here to walk the Forest due to the limited + # stack size in python. Therefore input_stack is essentially our stack. + input_stack = deque([root]) + + # It is much faster to cache these as locals since they are called + # many times in large parses. + vpno = getattr(self, 'visit_packed_node_out') + vpni = getattr(self, 'visit_packed_node_in') + vsno = getattr(self, 'visit_symbol_node_out') + vsni = getattr(self, 'visit_symbol_node_in') + vino = getattr(self, 'visit_intermediate_node_out', vsno) + vini = getattr(self, 'visit_intermediate_node_in', vsni) + vtn = getattr(self, 'visit_token_node') + oc = getattr(self, 'on_cycle') + + while input_stack: + current = next(reversed(input_stack)) + try: + next_node = next(current) + except StopIteration: + input_stack.pop() + continue + except TypeError: + ### If the current object is not an iterator, pass through to Token/SymbolNode + pass + else: + if next_node is None: + continue + + if id(next_node) in visiting: + oc(next_node, path) + continue + + input_stack.append(next_node) + continue + + if isinstance(current, TokenNode): + vtn(current.token) + input_stack.pop() + continue + + current_id = id(current) + if current_id in visiting: + if isinstance(current, PackedNode): + vpno(current) + elif current.is_intermediate: + vino(current) + else: + vsno(current) + input_stack.pop() + path.pop() + visiting.remove(current_id) + visited.add(current_id) + elif self.single_visit and current_id in visited: + input_stack.pop() + else: + visiting.add(current_id) + path.append(current) + if isinstance(current, PackedNode): + next_node = vpni(current) + elif current.is_intermediate: + next_node = vini(current) + else: + next_node = vsni(current) + if next_node is None: + continue + + if not isinstance(next_node, ForestNode): + next_node = iter(next_node) + elif id(next_node) in visiting: + oc(next_node, path) + continue + + input_stack.append(next_node) + +class ForestTransformer(ForestVisitor): + """The base class for a bottom-up forest transformation. Most users will + want to use ``TreeForestTransformer`` instead as it has a friendlier + interface and covers most use cases. + + Transformations are applied via inheritance and overriding of the + ``transform*node`` methods. + + ``transform_token_node`` receives a ``Token`` as an argument. + All other methods receive the node that is being transformed and + a list of the results of the transformations of that node's children. + The return value of these methods are the resulting transformations. + + If ``Discard`` is raised in a node's transformation, no data from that node + will be passed to its parent's transformation. + """ + + def __init__(self): + super(ForestTransformer, self).__init__() + # results of transformations + self.data = dict() + # used to track parent nodes + self.node_stack = deque() + + def transform(self, root): + """Perform a transformation on an SPPF.""" + self.node_stack.append('result') + self.data['result'] = [] + self.visit(root) + assert len(self.data['result']) <= 1 + if self.data['result']: + return self.data['result'][0] + + def transform_symbol_node(self, node, data): + """Transform a symbol node.""" + return node + + def transform_intermediate_node(self, node, data): + """Transform an intermediate node.""" + return node + + def transform_packed_node(self, node, data): + """Transform a packed node.""" + return node + + def transform_token_node(self, node): + """Transform a ``Token``.""" + return node + + def visit_symbol_node_in(self, node): + self.node_stack.append(id(node)) + self.data[id(node)] = [] + return node.children + + def visit_packed_node_in(self, node): + self.node_stack.append(id(node)) + self.data[id(node)] = [] + return node.children + + def visit_token_node(self, node): + transformed = self.transform_token_node(node) + if transformed is not Discard: + self.data[self.node_stack[-1]].append(transformed) + + def _visit_node_out_helper(self, node, method): + self.node_stack.pop() + transformed = method(node, self.data[id(node)]) + if transformed is not Discard: + self.data[self.node_stack[-1]].append(transformed) + del self.data[id(node)] + + def visit_symbol_node_out(self, node): + self._visit_node_out_helper(node, self.transform_symbol_node) + + def visit_intermediate_node_out(self, node): + self._visit_node_out_helper(node, self.transform_intermediate_node) + + def visit_packed_node_out(self, node): + self._visit_node_out_helper(node, self.transform_packed_node) + + +class ForestSumVisitor(ForestVisitor): + """ + A visitor for prioritizing ambiguous parts of the Forest. + + This visitor is used when support for explicit priorities on + rules is requested (whether normal, or invert). It walks the + forest (or subsets thereof) and cascades properties upwards + from the leaves. + + It would be ideal to do this during parsing, however this would + require processing each Earley item multiple times. That's + a big performance drawback; so running a forest walk is the + lesser of two evils: there can be significantly more Earley + items created during parsing than there are SPPF nodes in the + final tree. + """ + def __init__(self): + super(ForestSumVisitor, self).__init__(single_visit=True) + + def visit_packed_node_in(self, node): + yield node.left + yield node.right + + def visit_symbol_node_in(self, node): + return iter(node.children) + + def visit_packed_node_out(self, node): + priority = node.rule.options.priority if not node.parent.is_intermediate and node.rule.options.priority else 0 + priority += getattr(node.right, 'priority', 0) + priority += getattr(node.left, 'priority', 0) + node.priority = priority + + def visit_symbol_node_out(self, node): + node.priority = max(child.priority for child in node.children) + +class PackedData(): + """Used in transformationss of packed nodes to distinguish the data + that comes from the left child and the right child. + """ + + class _NoData(): + pass + + NO_DATA = _NoData() + + def __init__(self, node, data): + self.left = self.NO_DATA + self.right = self.NO_DATA + if data: + if node.left is not None: + self.left = data[0] + if len(data) > 1: + self.right = data[1] + else: + self.right = data[0] + +class ForestToParseTree(ForestTransformer): + """Used by the earley parser when ambiguity equals 'resolve' or + 'explicit'. Transforms an SPPF into an (ambiguous) parse tree. + + Parameters: + tree_class: The tree class to use for construction + callbacks: A dictionary of rules to functions that output a tree + prioritizer: A ``ForestVisitor`` that manipulates the priorities of ForestNodes + resolve_ambiguity: If True, ambiguities will be resolved based on + priorities. Otherwise, `_ambig` nodes will be in the resulting tree. + use_cache: If True, the results of packed node transformations will be cached. + """ + + def __init__(self, tree_class=Tree, callbacks=dict(), prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=True): + super(ForestToParseTree, self).__init__() + self.tree_class = tree_class + self.callbacks = callbacks + self.prioritizer = prioritizer + self.resolve_ambiguity = resolve_ambiguity + self._use_cache = use_cache + self._cache = {} + self._on_cycle_retreat = False + self._cycle_node = None + self._successful_visits = set() + + def visit(self, root): + if self.prioritizer: + self.prioritizer.visit(root) + super(ForestToParseTree, self).visit(root) + self._cache = {} + + def on_cycle(self, node, path): + logger.debug("Cycle encountered in the SPPF at node: %s. " + "As infinite ambiguities cannot be represented in a tree, " + "this family of derivations will be discarded.", node) + self._cycle_node = node + self._on_cycle_retreat = True + + def _check_cycle(self, node): + if self._on_cycle_retreat: + if id(node) == id(self._cycle_node) or id(node) in self._successful_visits: + self._cycle_node = None + self._on_cycle_retreat = False + else: + return Discard + + def _collapse_ambig(self, children): + new_children = [] + for child in children: + if hasattr(child, 'data') and child.data == '_ambig': + new_children += child.children + else: + new_children.append(child) + return new_children + + def _call_rule_func(self, node, data): + # called when transforming children of symbol nodes + # data is a list of trees or tokens that correspond to the + # symbol's rule expansion + return self.callbacks[node.rule](data) + + def _call_ambig_func(self, node, data): + # called when transforming a symbol node + # data is a list of trees where each tree's data is + # equal to the name of the symbol or one of its aliases. + if len(data) > 1: + return self.tree_class('_ambig', data) + elif data: + return data[0] + return Discard + + def transform_symbol_node(self, node, data): + if id(node) not in self._successful_visits: + return Discard + r = self._check_cycle(node) + if r is Discard: + return r + self._successful_visits.remove(id(node)) + data = self._collapse_ambig(data) + return self._call_ambig_func(node, data) + + def transform_intermediate_node(self, node, data): + if id(node) not in self._successful_visits: + return Discard + r = self._check_cycle(node) + if r is Discard: + return r + self._successful_visits.remove(id(node)) + if len(data) > 1: + children = [self.tree_class('_inter', c) for c in data] + return self.tree_class('_iambig', children) + return data[0] + + def transform_packed_node(self, node, data): + r = self._check_cycle(node) + if r is Discard: + return r + if self.resolve_ambiguity and id(node.parent) in self._successful_visits: + return Discard + if self._use_cache and id(node) in self._cache: + return self._cache[id(node)] + children = [] + assert len(data) <= 2 + data = PackedData(node, data) + if data.left is not PackedData.NO_DATA: + if node.left.is_intermediate and isinstance(data.left, list): + children += data.left + else: + children.append(data.left) + if data.right is not PackedData.NO_DATA: + children.append(data.right) + if node.parent.is_intermediate: + return self._cache.setdefault(id(node), children) + return self._cache.setdefault(id(node), self._call_rule_func(node, children)) + + def visit_symbol_node_in(self, node): + super(ForestToParseTree, self).visit_symbol_node_in(node) + if self._on_cycle_retreat: + return + return node.children + + def visit_packed_node_in(self, node): + self._on_cycle_retreat = False + to_visit = super(ForestToParseTree, self).visit_packed_node_in(node) + if not self.resolve_ambiguity or id(node.parent) not in self._successful_visits: + if not self._use_cache or id(node) not in self._cache: + return to_visit + + def visit_packed_node_out(self, node): + super(ForestToParseTree, self).visit_packed_node_out(node) + if not self._on_cycle_retreat: + self._successful_visits.add(id(node.parent)) + +def handles_ambiguity(func): + """Decorator for methods of subclasses of ``TreeForestTransformer``. + Denotes that the method should receive a list of transformed derivations.""" + func.handles_ambiguity = True + return func + +class TreeForestTransformer(ForestToParseTree): + """A ``ForestTransformer`` with a tree ``Transformer``-like interface. + By default, it will construct a tree. + + Methods provided via inheritance are called based on the rule/symbol + names of nodes in the forest. + + Methods that act on rules will receive a list of the results of the + transformations of the rule's children. By default, trees and tokens. + + Methods that act on tokens will receive a token. + + Alternatively, methods that act on rules may be annotated with + ``handles_ambiguity``. In this case, the function will receive a list + of all the transformations of all the derivations of the rule. + By default, a list of trees where each tree.data is equal to the + rule name or one of its aliases. + + Non-tree transformations are made possible by override of + ``__default__``, ``__default_token__``, and ``__default_ambig__``. + + Note: + Tree shaping features such as inlined rules and token filtering are + not built into the transformation. Positions are also not propagated. + + Parameters: + tree_class: The tree class to use for construction + prioritizer: A ``ForestVisitor`` that manipulates the priorities of nodes in the SPPF. + resolve_ambiguity: If True, ambiguities will be resolved based on priorities. + use_cache (bool): If True, caches the results of some transformations, + potentially improving performance when ``resolve_ambiguity==False``. + Only use if you know what you are doing: i.e. All transformation + functions are pure and referentially transparent. + """ + + def __init__(self, tree_class=Tree, prioritizer=ForestSumVisitor(), resolve_ambiguity=True, use_cache=False): + super(TreeForestTransformer, self).__init__(tree_class, dict(), prioritizer, resolve_ambiguity, use_cache) + + def __default__(self, name, data): + """Default operation on tree (for override). + + Returns a tree with name with data as children. + """ + return self.tree_class(name, data) + + def __default_ambig__(self, name, data): + """Default operation on ambiguous rule (for override). + + Wraps data in an '_ambig_' node if it contains more than + one element. + """ + if len(data) > 1: + return self.tree_class('_ambig', data) + elif data: + return data[0] + return Discard + + def __default_token__(self, node): + """Default operation on ``Token`` (for override). + + Returns ``node``. + """ + return node + + def transform_token_node(self, node): + return getattr(self, node.type, self.__default_token__)(node) + + def _call_rule_func(self, node, data): + name = node.rule.alias or node.rule.options.template_source or node.rule.origin.name + user_func = getattr(self, name, self.__default__) + if user_func == self.__default__ or hasattr(user_func, 'handles_ambiguity'): + user_func = partial(self.__default__, name) + if not self.resolve_ambiguity: + wrapper = partial(AmbiguousIntermediateExpander, self.tree_class) + user_func = wrapper(user_func) + return user_func(data) + + def _call_ambig_func(self, node, data): + name = node.s.name + user_func = getattr(self, name, self.__default_ambig__) + if user_func == self.__default_ambig__ or not hasattr(user_func, 'handles_ambiguity'): + user_func = partial(self.__default_ambig__, name) + return user_func(data) + +class ForestToPyDotVisitor(ForestVisitor): + """ + A Forest visitor which writes the SPPF to a PNG. + + The SPPF can get really large, really quickly because + of the amount of meta-data it stores, so this is probably + only useful for trivial trees and learning how the SPPF + is structured. + """ + def __init__(self, rankdir="TB"): + super(ForestToPyDotVisitor, self).__init__(single_visit=True) + self.pydot = import_module('pydot') + self.graph = self.pydot.Dot(graph_type='digraph', rankdir=rankdir) + + def visit(self, root, filename): + super(ForestToPyDotVisitor, self).visit(root) + try: + self.graph.write_png(filename) + except FileNotFoundError as e: + logger.error("Could not write png: ", e) + + def visit_token_node(self, node): + graph_node_id = str(id(node)) + graph_node_label = "\"{}\"".format(node.value.replace('"', '\\"')) + graph_node_color = 0x808080 + graph_node_style = "\"filled,rounded\"" + graph_node_shape = "diamond" + graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) + self.graph.add_node(graph_node) + + def visit_packed_node_in(self, node): + graph_node_id = str(id(node)) + graph_node_label = repr(node) + graph_node_color = 0x808080 + graph_node_style = "filled" + graph_node_shape = "diamond" + graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) + self.graph.add_node(graph_node) + yield node.left + yield node.right + + def visit_packed_node_out(self, node): + graph_node_id = str(id(node)) + graph_node = self.graph.get_node(graph_node_id)[0] + for child in [node.left, node.right]: + if child is not None: + child_graph_node_id = str(id(child.token if isinstance(child, TokenNode) else child)) + child_graph_node = self.graph.get_node(child_graph_node_id)[0] + self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) + else: + #### Try and be above the Python object ID range; probably impl. specific, but maybe this is okay. + child_graph_node_id = str(randint(100000000000000000000000000000,123456789012345678901234567890)) + child_graph_node_style = "invis" + child_graph_node = self.pydot.Node(child_graph_node_id, style=child_graph_node_style, label="None") + child_edge_style = "invis" + self.graph.add_node(child_graph_node) + self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node, style=child_edge_style)) + + def visit_symbol_node_in(self, node): + graph_node_id = str(id(node)) + graph_node_label = repr(node) + graph_node_color = 0x808080 + graph_node_style = "\"filled\"" + if node.is_intermediate: + graph_node_shape = "ellipse" + else: + graph_node_shape = "rectangle" + graph_node = self.pydot.Node(graph_node_id, style=graph_node_style, fillcolor="#{:06x}".format(graph_node_color), shape=graph_node_shape, label=graph_node_label) + self.graph.add_node(graph_node) + return iter(node.children) + + def visit_symbol_node_out(self, node): + graph_node_id = str(id(node)) + graph_node = self.graph.get_node(graph_node_id)[0] + for child in node.children: + child_graph_node_id = str(id(child)) + child_graph_node = self.graph.get_node(child_graph_node_id)[0] + self.graph.add_edge(self.pydot.Edge(graph_node, child_graph_node)) diff --git a/src/poetry/core/_vendor/lark/parsers/grammar_analysis.py b/src/poetry/core/_vendor/lark/parsers/grammar_analysis.py new file mode 100644 index 0000000..b526e47 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/grammar_analysis.py @@ -0,0 +1,185 @@ +from collections import Counter, defaultdict + +from ..utils import bfs, fzset, classify +from ..exceptions import GrammarError +from ..grammar import Rule, Terminal, NonTerminal + + +class RulePtr: + __slots__ = ('rule', 'index') + + def __init__(self, rule, index): + assert isinstance(rule, Rule) + assert index <= len(rule.expansion) + self.rule = rule + self.index = index + + def __repr__(self): + before = [x.name for x in self.rule.expansion[:self.index]] + after = [x.name for x in self.rule.expansion[self.index:]] + return '<%s : %s * %s>' % (self.rule.origin.name, ' '.join(before), ' '.join(after)) + + @property + def next(self): + return self.rule.expansion[self.index] + + def advance(self, sym): + assert self.next == sym + return RulePtr(self.rule, self.index+1) + + @property + def is_satisfied(self): + return self.index == len(self.rule.expansion) + + def __eq__(self, other): + return self.rule == other.rule and self.index == other.index + def __hash__(self): + return hash((self.rule, self.index)) + + +# state generation ensures no duplicate LR0ItemSets +class LR0ItemSet: + __slots__ = ('kernel', 'closure', 'transitions', 'lookaheads') + + def __init__(self, kernel, closure): + self.kernel = fzset(kernel) + self.closure = fzset(closure) + self.transitions = {} + self.lookaheads = defaultdict(set) + + def __repr__(self): + return '{%s | %s}' % (', '.join([repr(r) for r in self.kernel]), ', '.join([repr(r) for r in self.closure])) + + +def update_set(set1, set2): + if not set2 or set1 > set2: + return False + + copy = set(set1) + set1 |= set2 + return set1 != copy + +def calculate_sets(rules): + """Calculate FOLLOW sets. + + Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets""" + symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules} + + # foreach grammar rule X ::= Y(1) ... Y(k) + # if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then + # NULLABLE = NULLABLE union {X} + # for i = 1 to k + # if i=1 or {Y(1),...,Y(i-1)} subset of NULLABLE then + # FIRST(X) = FIRST(X) union FIRST(Y(i)) + # for j = i+1 to k + # if i=k or {Y(i+1),...Y(k)} subset of NULLABLE then + # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FOLLOW(X) + # if i+1=j or {Y(i+1),...,Y(j-1)} subset of NULLABLE then + # FOLLOW(Y(i)) = FOLLOW(Y(i)) union FIRST(Y(j)) + # until none of NULLABLE,FIRST,FOLLOW changed in last iteration + + NULLABLE = set() + FIRST = {} + FOLLOW = {} + for sym in symbols: + FIRST[sym]={sym} if sym.is_term else set() + FOLLOW[sym]=set() + + # Calculate NULLABLE and FIRST + changed = True + while changed: + changed = False + + for rule in rules: + if set(rule.expansion) <= NULLABLE: + if update_set(NULLABLE, {rule.origin}): + changed = True + + for i, sym in enumerate(rule.expansion): + if set(rule.expansion[:i]) <= NULLABLE: + if update_set(FIRST[rule.origin], FIRST[sym]): + changed = True + else: + break + + # Calculate FOLLOW + changed = True + while changed: + changed = False + + for rule in rules: + for i, sym in enumerate(rule.expansion): + if i==len(rule.expansion)-1 or set(rule.expansion[i+1:]) <= NULLABLE: + if update_set(FOLLOW[sym], FOLLOW[rule.origin]): + changed = True + + for j in range(i+1, len(rule.expansion)): + if set(rule.expansion[i+1:j]) <= NULLABLE: + if update_set(FOLLOW[sym], FIRST[rule.expansion[j]]): + changed = True + + return FIRST, FOLLOW, NULLABLE + + +class GrammarAnalyzer: + def __init__(self, parser_conf, debug=False): + self.debug = debug + + root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start), Terminal('$END')]) + for start in parser_conf.start} + + rules = parser_conf.rules + list(root_rules.values()) + self.rules_by_origin = classify(rules, lambda r: r.origin) + + if len(rules) != len(set(rules)): + duplicates = [item for item, count in Counter(rules).items() if count > 1] + raise GrammarError("Rules defined twice: %s" % ', '.join(str(i) for i in duplicates)) + + for r in rules: + for sym in r.expansion: + if not (sym.is_term or sym in self.rules_by_origin): + raise GrammarError("Using an undefined rule: %s" % sym) + + self.start_states = {start: self.expand_rule(root_rule.origin) + for start, root_rule in root_rules.items()} + + self.end_states = {start: fzset({RulePtr(root_rule, len(root_rule.expansion))}) + for start, root_rule in root_rules.items()} + + lr0_root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start)]) + for start in parser_conf.start} + + lr0_rules = parser_conf.rules + list(lr0_root_rules.values()) + assert(len(lr0_rules) == len(set(lr0_rules))) + + self.lr0_rules_by_origin = classify(lr0_rules, lambda r: r.origin) + + # cache RulePtr(r, 0) in r (no duplicate RulePtr objects) + self.lr0_start_states = {start: LR0ItemSet([RulePtr(root_rule, 0)], self.expand_rule(root_rule.origin, self.lr0_rules_by_origin)) + for start, root_rule in lr0_root_rules.items()} + + self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(rules) + + def expand_rule(self, source_rule, rules_by_origin=None): + "Returns all init_ptrs accessible by rule (recursive)" + + if rules_by_origin is None: + rules_by_origin = self.rules_by_origin + + init_ptrs = set() + def _expand_rule(rule): + assert not rule.is_term, rule + + for r in rules_by_origin[rule]: + init_ptr = RulePtr(r, 0) + init_ptrs.add(init_ptr) + + if r.expansion: # if not empty rule + new_r = init_ptr.next + if not new_r.is_term: + yield new_r + + for _ in bfs([source_rule], _expand_rule): + pass + + return fzset(init_ptrs) diff --git a/src/poetry/core/_vendor/lark/parsers/lalr_analysis.py b/src/poetry/core/_vendor/lark/parsers/lalr_analysis.py new file mode 100644 index 0000000..216371e --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/lalr_analysis.py @@ -0,0 +1,303 @@ +"""This module builds a LALR(1) transition-table for lalr_parser.py + +For now, shift/reduce conflicts are automatically resolved as shifts. +""" + +# Author: Erez Shinan (2017) +# Email : erezshin@gmail.com + +from collections import defaultdict + +from ..utils import classify, classify_bool, bfs, fzset, Enumerator, logger +from ..exceptions import GrammarError + +from .grammar_analysis import GrammarAnalyzer, Terminal, LR0ItemSet +from ..grammar import Rule + +###{standalone + +class Action: + def __init__(self, name): + self.name = name + def __str__(self): + return self.name + def __repr__(self): + return str(self) + +Shift = Action('Shift') +Reduce = Action('Reduce') + + +class ParseTable: + def __init__(self, states, start_states, end_states): + self.states = states + self.start_states = start_states + self.end_states = end_states + + def serialize(self, memo): + tokens = Enumerator() + + states = { + state: {tokens.get(token): ((1, arg.serialize(memo)) if action is Reduce else (0, arg)) + for token, (action, arg) in actions.items()} + for state, actions in self.states.items() + } + + return { + 'tokens': tokens.reversed(), + 'states': states, + 'start_states': self.start_states, + 'end_states': self.end_states, + } + + @classmethod + def deserialize(cls, data, memo): + tokens = data['tokens'] + states = { + state: {tokens[token]: ((Reduce, Rule.deserialize(arg, memo)) if action==1 else (Shift, arg)) + for token, (action, arg) in actions.items()} + for state, actions in data['states'].items() + } + return cls(states, data['start_states'], data['end_states']) + + +class IntParseTable(ParseTable): + + @classmethod + def from_ParseTable(cls, parse_table): + enum = list(parse_table.states) + state_to_idx = {s:i for i,s in enumerate(enum)} + int_states = {} + + for s, la in parse_table.states.items(): + la = {k:(v[0], state_to_idx[v[1]]) if v[0] is Shift else v + for k,v in la.items()} + int_states[ state_to_idx[s] ] = la + + + start_states = {start:state_to_idx[s] for start, s in parse_table.start_states.items()} + end_states = {start:state_to_idx[s] for start, s in parse_table.end_states.items()} + return cls(int_states, start_states, end_states) + +###} + + +# digraph and traverse, see The Theory and Practice of Compiler Writing + +# computes F(x) = G(x) union (union { G(y) | x R y }) +# X: nodes +# R: relation (function mapping node -> list of nodes that satisfy the relation) +# G: set valued function +def digraph(X, R, G): + F = {} + S = [] + N = {} + for x in X: + N[x] = 0 + for x in X: + # this is always true for the first iteration, but N[x] may be updated in traverse below + if N[x] == 0: + traverse(x, S, N, X, R, G, F) + return F + +# x: single node +# S: stack +# N: weights +# X: nodes +# R: relation (see above) +# G: set valued function +# F: set valued function we are computing (map of input -> output) +def traverse(x, S, N, X, R, G, F): + S.append(x) + d = len(S) + N[x] = d + F[x] = G[x] + for y in R[x]: + if N[y] == 0: + traverse(y, S, N, X, R, G, F) + n_x = N[x] + assert(n_x > 0) + n_y = N[y] + assert(n_y != 0) + if (n_y > 0) and (n_y < n_x): + N[x] = n_y + F[x].update(F[y]) + if N[x] == d: + f_x = F[x] + while True: + z = S.pop() + N[z] = -1 + F[z] = f_x + if z == x: + break + + +class LALR_Analyzer(GrammarAnalyzer): + def __init__(self, parser_conf, debug=False): + GrammarAnalyzer.__init__(self, parser_conf, debug) + self.nonterminal_transitions = [] + self.directly_reads = defaultdict(set) + self.reads = defaultdict(set) + self.includes = defaultdict(set) + self.lookback = defaultdict(set) + + + def compute_lr0_states(self): + self.lr0_states = set() + # map of kernels to LR0ItemSets + cache = {} + + def step(state): + _, unsat = classify_bool(state.closure, lambda rp: rp.is_satisfied) + + d = classify(unsat, lambda rp: rp.next) + for sym, rps in d.items(): + kernel = fzset({rp.advance(sym) for rp in rps}) + new_state = cache.get(kernel, None) + if new_state is None: + closure = set(kernel) + for rp in kernel: + if not rp.is_satisfied and not rp.next.is_term: + closure |= self.expand_rule(rp.next, self.lr0_rules_by_origin) + new_state = LR0ItemSet(kernel, closure) + cache[kernel] = new_state + + state.transitions[sym] = new_state + yield new_state + + self.lr0_states.add(state) + + for _ in bfs(self.lr0_start_states.values(), step): + pass + + def compute_reads_relations(self): + # handle start state + for root in self.lr0_start_states.values(): + assert(len(root.kernel) == 1) + for rp in root.kernel: + assert(rp.index == 0) + self.directly_reads[(root, rp.next)] = set([ Terminal('$END') ]) + + for state in self.lr0_states: + seen = set() + for rp in state.closure: + if rp.is_satisfied: + continue + s = rp.next + # if s is a not a nonterminal + if s not in self.lr0_rules_by_origin: + continue + if s in seen: + continue + seen.add(s) + nt = (state, s) + self.nonterminal_transitions.append(nt) + dr = self.directly_reads[nt] + r = self.reads[nt] + next_state = state.transitions[s] + for rp2 in next_state.closure: + if rp2.is_satisfied: + continue + s2 = rp2.next + # if s2 is a terminal + if s2 not in self.lr0_rules_by_origin: + dr.add(s2) + if s2 in self.NULLABLE: + r.add((next_state, s2)) + + def compute_includes_lookback(self): + for nt in self.nonterminal_transitions: + state, nonterminal = nt + includes = [] + lookback = self.lookback[nt] + for rp in state.closure: + if rp.rule.origin != nonterminal: + continue + # traverse the states for rp(.rule) + state2 = state + for i in range(rp.index, len(rp.rule.expansion)): + s = rp.rule.expansion[i] + nt2 = (state2, s) + state2 = state2.transitions[s] + if nt2 not in self.reads: + continue + for j in range(i + 1, len(rp.rule.expansion)): + if not rp.rule.expansion[j] in self.NULLABLE: + break + else: + includes.append(nt2) + # state2 is at the final state for rp.rule + if rp.index == 0: + for rp2 in state2.closure: + if (rp2.rule == rp.rule) and rp2.is_satisfied: + lookback.add((state2, rp2.rule)) + for nt2 in includes: + self.includes[nt2].add(nt) + + def compute_lookaheads(self): + read_sets = digraph(self.nonterminal_transitions, self.reads, self.directly_reads) + follow_sets = digraph(self.nonterminal_transitions, self.includes, read_sets) + + for nt, lookbacks in self.lookback.items(): + for state, rule in lookbacks: + for s in follow_sets[nt]: + state.lookaheads[s].add(rule) + + def compute_lalr1_states(self): + m = {} + reduce_reduce = [] + for state in self.lr0_states: + actions = {} + for la, next_state in state.transitions.items(): + actions[la] = (Shift, next_state.closure) + for la, rules in state.lookaheads.items(): + if len(rules) > 1: + # Try to resolve conflict based on priority + p = [(r.options.priority or 0, r) for r in rules] + p.sort(key=lambda r: r[0], reverse=True) + best, second_best = p[:2] + if best[0] > second_best[0]: + rules = [best[1]] + else: + reduce_reduce.append((state, la, rules)) + if la in actions: + if self.debug: + logger.warning('Shift/Reduce conflict for terminal %s: (resolving as shift)', la.name) + logger.warning(' * %s', list(rules)[0]) + else: + actions[la] = (Reduce, list(rules)[0]) + m[state] = { k.name: v for k, v in actions.items() } + + if reduce_reduce: + msgs = [] + for state, la, rules in reduce_reduce: + msg = 'Reduce/Reduce collision in %s between the following rules: %s' % (la, ''.join([ '\n\t- ' + str(r) for r in rules ])) + if self.debug: + msg += '\n collision occurred in state: {%s\n }' % ''.join(['\n\t' + str(x) for x in state.closure]) + msgs.append(msg) + raise GrammarError('\n\n'.join(msgs)) + + states = { k.closure: v for k, v in m.items() } + + # compute end states + end_states = {} + for state in states: + for rp in state: + for start in self.lr0_start_states: + if rp.rule.origin.name == ('$root_' + start) and rp.is_satisfied: + assert(start not in end_states) + end_states[start] = state + + _parse_table = ParseTable(states, { start: state.closure for start, state in self.lr0_start_states.items() }, end_states) + + if self.debug: + self.parse_table = _parse_table + else: + self.parse_table = IntParseTable.from_ParseTable(_parse_table) + + def compute_lalr(self): + self.compute_lr0_states() + self.compute_reads_relations() + self.compute_includes_lookback() + self.compute_lookaheads() + self.compute_lalr1_states() diff --git a/src/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py b/src/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py new file mode 100644 index 0000000..0013ddf --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/lalr_interactive_parser.py @@ -0,0 +1,148 @@ +# This module provides a LALR interactive parser, which is used for debugging and error handling + +from typing import Iterator, List +from copy import copy +import warnings + +from lark.exceptions import UnexpectedToken +from lark.lexer import Token, LexerThread + + +class InteractiveParser: + """InteractiveParser gives you advanced control over parsing and error handling when parsing with LALR. + + For a simpler interface, see the ``on_error`` argument to ``Lark.parse()``. + """ + def __init__(self, parser, parser_state, lexer_thread: LexerThread): + self.parser = parser + self.parser_state = parser_state + self.lexer_thread = lexer_thread + self.result = None + + @property + def lexer_state(self) -> LexerThread: + warnings.warn("lexer_state will be removed in subsequent releases. Use lexer_thread instead.", DeprecationWarning) + return self.lexer_thread + + def feed_token(self, token: Token): + """Feed the parser with a token, and advance it to the next state, as if it received it from the lexer. + + Note that ``token`` has to be an instance of ``Token``. + """ + return self.parser_state.feed_token(token, token.type == '$END') + + def iter_parse(self) -> Iterator[Token]: + """Step through the different stages of the parse, by reading tokens from the lexer + and feeding them to the parser, one per iteration. + + Returns an iterator of the tokens it encounters. + + When the parse is over, the resulting tree can be found in ``InteractiveParser.result``. + """ + for token in self.lexer_thread.lex(self.parser_state): + yield token + self.result = self.feed_token(token) + + def exhaust_lexer(self) -> List[Token]: + """Try to feed the rest of the lexer state into the interactive parser. + + Note that this modifies the instance in place and does not feed an '$END' Token + """ + return list(self.iter_parse()) + + + def feed_eof(self, last_token=None): + """Feed a '$END' Token. Borrows from 'last_token' if given.""" + eof = Token.new_borrow_pos('$END', '', last_token) if last_token is not None else self.lexer_thread._Token('$END', '', 0, 1, 1) + return self.feed_token(eof) + + + def __copy__(self): + """Create a new interactive parser with a separate state. + + Calls to feed_token() won't affect the old instance, and vice-versa. + """ + return type(self)( + self.parser, + copy(self.parser_state), + copy(self.lexer_thread), + ) + + def copy(self): + return copy(self) + + def __eq__(self, other): + if not isinstance(other, InteractiveParser): + return False + + return self.parser_state == other.parser_state and self.lexer_thread == other.lexer_thread + + def as_immutable(self): + """Convert to an ``ImmutableInteractiveParser``.""" + p = copy(self) + return ImmutableInteractiveParser(p.parser, p.parser_state, p.lexer_thread) + + def pretty(self): + """Print the output of ``choices()`` in a way that's easier to read.""" + out = ["Parser choices:"] + for k, v in self.choices().items(): + out.append('\t- %s -> %r' % (k, v)) + out.append('stack size: %s' % len(self.parser_state.state_stack)) + return '\n'.join(out) + + def choices(self): + """Returns a dictionary of token types, matched to their action in the parser. + + Only returns token types that are accepted by the current state. + + Updated by ``feed_token()``. + """ + return self.parser_state.parse_conf.parse_table.states[self.parser_state.position] + + def accepts(self): + """Returns the set of possible tokens that will advance the parser into a new valid state.""" + accepts = set() + for t in self.choices(): + if t.isupper(): # is terminal? + new_cursor = copy(self) + try: + new_cursor.feed_token(self.lexer_thread._Token(t, '')) + except UnexpectedToken: + pass + else: + accepts.add(t) + return accepts + + def resume_parse(self): + """Resume automated parsing from the current state.""" + return self.parser.parse_from_state(self.parser_state) + + + +class ImmutableInteractiveParser(InteractiveParser): + """Same as ``InteractiveParser``, but operations create a new instance instead + of changing it in-place. + """ + + result = None + + def __hash__(self): + return hash((self.parser_state, self.lexer_thread)) + + def feed_token(self, token): + c = copy(self) + c.result = InteractiveParser.feed_token(c, token) + return c + + def exhaust_lexer(self): + """Try to feed the rest of the lexer state into the parser. + + Note that this returns a new ImmutableInteractiveParser and does not feed an '$END' Token""" + cursor = self.as_mutable() + cursor.exhaust_lexer() + return cursor.as_immutable() + + def as_mutable(self): + """Convert to an ``InteractiveParser``.""" + p = copy(self) + return InteractiveParser(p.parser, p.parser_state, p.lexer_thread) diff --git a/src/poetry/core/_vendor/lark/parsers/lalr_parser.py b/src/poetry/core/_vendor/lark/parsers/lalr_parser.py new file mode 100644 index 0000000..c89c49d --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/lalr_parser.py @@ -0,0 +1,199 @@ +"""This module implements a LALR(1) Parser +""" +# Author: Erez Shinan (2017) +# Email : erezshin@gmail.com +from copy import deepcopy, copy +from typing import Dict, Any +from ..lexer import Token +from ..utils import Serialize + +from .lalr_analysis import LALR_Analyzer, Shift, Reduce, IntParseTable +from .lalr_interactive_parser import InteractiveParser +from lark.exceptions import UnexpectedCharacters, UnexpectedInput, UnexpectedToken + +###{standalone + +class LALR_Parser(Serialize): + def __init__(self, parser_conf, debug=False): + analysis = LALR_Analyzer(parser_conf, debug=debug) + analysis.compute_lalr() + callbacks = parser_conf.callbacks + + self._parse_table = analysis.parse_table + self.parser_conf = parser_conf + self.parser = _Parser(analysis.parse_table, callbacks, debug) + + @classmethod + def deserialize(cls, data, memo, callbacks, debug=False): + inst = cls.__new__(cls) + inst._parse_table = IntParseTable.deserialize(data, memo) + inst.parser = _Parser(inst._parse_table, callbacks, debug) + return inst + + def serialize(self, memo: Any = None) -> Dict[str, Any]: + return self._parse_table.serialize(memo) + + def parse_interactive(self, lexer, start): + return self.parser.parse(lexer, start, start_interactive=True) + + def parse(self, lexer, start, on_error=None): + try: + return self.parser.parse(lexer, start) + except UnexpectedInput as e: + if on_error is None: + raise + + while True: + if isinstance(e, UnexpectedCharacters): + s = e.interactive_parser.lexer_thread.state + p = s.line_ctr.char_pos + + if not on_error(e): + raise e + + if isinstance(e, UnexpectedCharacters): + # If user didn't change the character position, then we should + if p == s.line_ctr.char_pos: + s.line_ctr.feed(s.text[p:p+1]) + + try: + return e.interactive_parser.resume_parse() + except UnexpectedToken as e2: + if (isinstance(e, UnexpectedToken) + and e.token.type == e2.token.type == '$END' + and e.interactive_parser == e2.interactive_parser): + # Prevent infinite loop + raise e2 + e = e2 + except UnexpectedCharacters as e2: + e = e2 + + +class ParseConf: + __slots__ = 'parse_table', 'callbacks', 'start', 'start_state', 'end_state', 'states' + + def __init__(self, parse_table, callbacks, start): + self.parse_table = parse_table + + self.start_state = self.parse_table.start_states[start] + self.end_state = self.parse_table.end_states[start] + self.states = self.parse_table.states + + self.callbacks = callbacks + self.start = start + + +class ParserState: + __slots__ = 'parse_conf', 'lexer', 'state_stack', 'value_stack' + + def __init__(self, parse_conf, lexer, state_stack=None, value_stack=None): + self.parse_conf = parse_conf + self.lexer = lexer + self.state_stack = state_stack or [self.parse_conf.start_state] + self.value_stack = value_stack or [] + + @property + def position(self): + return self.state_stack[-1] + + # Necessary for match_examples() to work + def __eq__(self, other): + if not isinstance(other, ParserState): + return NotImplemented + return len(self.state_stack) == len(other.state_stack) and self.position == other.position + + def __copy__(self): + return type(self)( + self.parse_conf, + self.lexer, # XXX copy + copy(self.state_stack), + deepcopy(self.value_stack), + ) + + def copy(self): + return copy(self) + + def feed_token(self, token, is_end=False): + state_stack = self.state_stack + value_stack = self.value_stack + states = self.parse_conf.states + end_state = self.parse_conf.end_state + callbacks = self.parse_conf.callbacks + + while True: + state = state_stack[-1] + try: + action, arg = states[state][token.type] + except KeyError: + expected = {s for s in states[state].keys() if s.isupper()} + raise UnexpectedToken(token, expected, state=self, interactive_parser=None) + + assert arg != end_state + + if action is Shift: + # shift once and return + assert not is_end + state_stack.append(arg) + value_stack.append(token if token.type not in callbacks else callbacks[token.type](token)) + return + else: + # reduce+shift as many times as necessary + rule = arg + size = len(rule.expansion) + if size: + s = value_stack[-size:] + del state_stack[-size:] + del value_stack[-size:] + else: + s = [] + + value = callbacks[rule](s) + + _action, new_state = states[state_stack[-1]][rule.origin.name] + assert _action is Shift + state_stack.append(new_state) + value_stack.append(value) + + if is_end and state_stack[-1] == end_state: + return value_stack[-1] + +class _Parser: + def __init__(self, parse_table, callbacks, debug=False): + self.parse_table = parse_table + self.callbacks = callbacks + self.debug = debug + + def parse(self, lexer, start, value_stack=None, state_stack=None, start_interactive=False): + parse_conf = ParseConf(self.parse_table, self.callbacks, start) + parser_state = ParserState(parse_conf, lexer, state_stack, value_stack) + if start_interactive: + return InteractiveParser(self, parser_state, parser_state.lexer) + return self.parse_from_state(parser_state) + + + def parse_from_state(self, state): + # Main LALR-parser loop + try: + token = None + for token in state.lexer.lex(state): + state.feed_token(token) + + end_token = Token.new_borrow_pos('$END', '', token) if token else Token('$END', '', 0, 1, 1) + return state.feed_token(end_token, True) + except UnexpectedInput as e: + try: + e.interactive_parser = InteractiveParser(self, state, state.lexer) + except NameError: + pass + raise e + except Exception as e: + if self.debug: + print("") + print("STATE STACK DUMP") + print("----------------") + for i, s in enumerate(state.state_stack): + print('%d)' % i , s) + print("") + + raise +###} diff --git a/src/poetry/core/_vendor/lark/parsers/resolve_ambig.py b/src/poetry/core/_vendor/lark/parsers/resolve_ambig.py new file mode 100644 index 0000000..2470eb9 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/resolve_ambig.py @@ -0,0 +1,109 @@ +from ..utils import compare +from functools import cmp_to_key + +from ..tree import Tree + + +# Standard ambiguity resolver (uses comparison) +# +# Author: Erez Sh + +def _compare_rules(rule1, rule2): + return -compare( len(rule1.expansion), len(rule2.expansion)) + +def _sum_priority(tree): + p = 0 + + for n in tree.iter_subtrees(): + try: + p += n.meta.rule.options.priority or 0 + except AttributeError: + pass + + return p + +def _compare_priority(tree1, tree2): + tree1.iter_subtrees() + +def _compare_drv(tree1, tree2): + try: + rule1 = tree1.meta.rule + except AttributeError: + rule1 = None + + try: + rule2 = tree2.meta.rule + except AttributeError: + rule2 = None + + if None == rule1 == rule2: + return compare(tree1, tree2) + elif rule1 is None: + return -1 + elif rule2 is None: + return 1 + + assert tree1.data != '_ambig' + assert tree2.data != '_ambig' + + p1 = _sum_priority(tree1) + p2 = _sum_priority(tree2) + c = (p1 or p2) and compare(p1, p2) + if c: + return c + + c = _compare_rules(tree1.meta.rule, tree2.meta.rule) + if c: + return c + + # rules are "equal", so compare trees + if len(tree1.children) == len(tree2.children): + for t1, t2 in zip(tree1.children, tree2.children): + c = _compare_drv(t1, t2) + if c: + return c + + return compare(len(tree1.children), len(tree2.children)) + + +def _standard_resolve_ambig(tree): + assert tree.data == '_ambig' + key_f = cmp_to_key(_compare_drv) + best = max(tree.children, key=key_f) + assert best.data == 'drv' + tree.set('drv', best.children) + tree.meta.rule = best.meta.rule # needed for applying callbacks + +def standard_resolve_ambig(tree): + for ambig in tree.find_data('_ambig'): + _standard_resolve_ambig(ambig) + + return tree + + + + +# Anti-score Sum +# +# Author: Uriva (https://github.com/uriva) + +def _antiscore_sum_drv(tree): + if not isinstance(tree, Tree): + return 0 + + assert tree.data != '_ambig' + + return _sum_priority(tree) + +def _antiscore_sum_resolve_ambig(tree): + assert tree.data == '_ambig' + best = min(tree.children, key=_antiscore_sum_drv) + assert best.data == 'drv' + tree.set('drv', best.children) + tree.meta.rule = best.meta.rule # needed for applying callbacks + +def antiscore_sum_resolve_ambig(tree): + for ambig in tree.find_data('_ambig'): + _antiscore_sum_resolve_ambig(ambig) + + return tree diff --git a/src/poetry/core/_vendor/lark/parsers/xearley.py b/src/poetry/core/_vendor/lark/parsers/xearley.py new file mode 100644 index 0000000..343e5c0 --- /dev/null +++ b/src/poetry/core/_vendor/lark/parsers/xearley.py @@ -0,0 +1,159 @@ +"""This module implements an experimental Earley parser with a dynamic lexer + +The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: + https://www.sciencedirect.com/science/article/pii/S1571066108001497 + +That is probably the best reference for understanding the algorithm here. + +The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format +is better documented here: + http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ + +Instead of running a lexer beforehand, or using a costy char-by-char method, this parser +uses regular expressions by necessity, achieving high-performance while maintaining all of +Earley's power in parsing any CFG. +""" + +from collections import defaultdict + +from ..tree import Tree +from ..exceptions import UnexpectedCharacters +from ..lexer import Token +from ..grammar import Terminal +from .earley import Parser as BaseParser +from .earley_forest import SymbolNode, TokenNode + + +class Parser(BaseParser): + def __init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity=True, complete_lex = False, debug=False, tree_class=Tree): + BaseParser.__init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity, debug, tree_class) + self.ignore = [Terminal(t) for t in lexer_conf.ignore] + self.complete_lex = complete_lex + + def _parse(self, stream, columns, to_scan, start_symbol=None): + + def scan(i, to_scan): + """The core Earley Scanner. + + This is a custom implementation of the scanner that uses the + Lark lexer to match tokens. The scan list is built by the + Earley predictor, based on the previously completed tokens. + This ensures that at each phase of the parse we have a custom + lexer context, allowing for more complex ambiguities.""" + + node_cache = {} + + # 1) Loop the expectations and ask the lexer to match. + # Since regexp is forward looking on the input stream, and we only + # want to process tokens when we hit the point in the stream at which + # they complete, we push all tokens into a buffer (delayed_matches), to + # be held possibly for a later parse step when we reach the point in the + # input stream at which they complete. + for item in set(to_scan): + m = match(item.expect, stream, i) + if m: + t = Token(item.expect.name, m.group(0), i, text_line, text_column) + delayed_matches[m.end()].append( (item, i, t) ) + + if self.complete_lex: + s = m.group(0) + for j in range(1, len(s)): + m = match(item.expect, s[:-j]) + if m: + t = Token(item.expect.name, m.group(0), i, text_line, text_column) + delayed_matches[i+m.end()].append( (item, i, t) ) + + # XXX The following 3 lines were commented out for causing a bug. See issue #768 + # # Remove any items that successfully matched in this pass from the to_scan buffer. + # # This ensures we don't carry over tokens that already matched, if we're ignoring below. + # to_scan.remove(item) + + # 3) Process any ignores. This is typically used for e.g. whitespace. + # We carry over any unmatched items from the to_scan buffer to be matched again after + # the ignore. This should allow us to use ignored symbols in non-terminals to implement + # e.g. mandatory spacing. + for x in self.ignore: + m = match(x, stream, i) + if m: + # Carry over any items still in the scan buffer, to past the end of the ignored items. + delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ]) + + # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed. + delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol]) + + next_to_scan = set() + next_set = set() + columns.append(next_set) + transitives.append({}) + + ## 4) Process Tokens from delayed_matches. + # This is the core of the Earley scanner. Create an SPPF node for each Token, + # and create the symbol node in the SPPF tree. Advance the item that completed, + # and add the resulting new item to either the Earley set (for processing by the + # completer/predictor) or the to_scan buffer for the next parse step. + for item, start, token in delayed_matches[i+1]: + if token is not None: + token.end_line = text_line + token.end_column = text_column + 1 + token.end_pos = i + 1 + + new_item = item.advance() + label = (new_item.s, new_item.start, i) + token_node = TokenNode(token, terminals[token.type]) + new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) + new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) + else: + new_item = item + + if new_item.expect in self.TERMINALS: + # add (B ::= Aai+1.B, h, y) to Q' + next_to_scan.add(new_item) + else: + # add (B ::= Aa+1.B, h, y) to Ei+1 + next_set.add(new_item) + + del delayed_matches[i+1] # No longer needed, so unburden memory + + if not next_set and not delayed_matches and not next_to_scan: + considered_rules = list(sorted(to_scan, key=lambda key: key.rule.origin.name)) + raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, + set(to_scan), state=frozenset(i.s for i in to_scan), + considered_rules=considered_rules + ) + + return next_to_scan + + + delayed_matches = defaultdict(list) + match = self.term_matcher + terminals = self.lexer_conf.terminals_by_name + + # Cache for nodes & tokens created in a particular parse step. + transitives = [{}] + + text_line = 1 + text_column = 1 + + ## The main Earley loop. + # Run the Prediction/Completion cycle for any Items in the current Earley set. + # Completions will be added to the SPPF tree, and predictions will be recursively + # processed down to terminals/empty nodes to be added to the scanner for the next + # step. + i = 0 + for token in stream: + self.predict_and_complete(i, to_scan, columns, transitives) + + to_scan = scan(i, to_scan) + + if token == '\n': + text_line += 1 + text_column = 1 + else: + text_column += 1 + i += 1 + + self.predict_and_complete(i, to_scan, columns, transitives) + + ## Column is now the final column in the parse. + assert i == len(columns)-1 + return to_scan diff --git a/src/poetry/core/_vendor/lark/py.typed b/src/poetry/core/_vendor/lark/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/lark/reconstruct.py b/src/poetry/core/_vendor/lark/reconstruct.py new file mode 100644 index 0000000..906ca81 --- /dev/null +++ b/src/poetry/core/_vendor/lark/reconstruct.py @@ -0,0 +1,106 @@ +"""Reconstruct text from a tree, based on Lark grammar""" + +from typing import List, Dict, Union, Callable, Iterable, Optional + +from .lark import Lark +from .tree import Tree, ParseTree +from .visitors import Transformer_InPlace +from .lexer import Token, PatternStr, TerminalDef +from .grammar import Terminal, NonTerminal, Symbol + +from .tree_matcher import TreeMatcher, is_discarded_terminal +from .utils import is_id_continue + +def is_iter_empty(i): + try: + _ = next(i) + return False + except StopIteration: + return True + + +class WriteTokensTransformer(Transformer_InPlace): + "Inserts discarded tokens into their correct place, according to the rules of grammar" + + tokens: Dict[str, TerminalDef] + term_subs: Dict[str, Callable[[Symbol], str]] + + def __init__(self, tokens: Dict[str, TerminalDef], term_subs: Dict[str, Callable[[Symbol], str]]) -> None: + self.tokens = tokens + self.term_subs = term_subs + + def __default__(self, data, children, meta): + if not getattr(meta, 'match_tree', False): + return Tree(data, children) + + iter_args = iter(children) + to_write = [] + for sym in meta.orig_expansion: + if is_discarded_terminal(sym): + try: + v = self.term_subs[sym.name](sym) + except KeyError: + t = self.tokens[sym.name] + if not isinstance(t.pattern, PatternStr): + raise NotImplementedError("Reconstructing regexps not supported yet: %s" % t) + + v = t.pattern.value + to_write.append(v) + else: + x = next(iter_args) + if isinstance(x, list): + to_write += x + else: + if isinstance(x, Token): + assert Terminal(x.type) == sym, x + else: + assert NonTerminal(x.data) == sym, (sym, x) + to_write.append(x) + + assert is_iter_empty(iter_args) + return to_write + + +class Reconstructor(TreeMatcher): + """ + A Reconstructor that will, given a full parse Tree, generate source code. + + Note: + The reconstructor cannot generate values from regexps. If you need to produce discarded + regexes, such as newlines, use `term_subs` and provide default values for them. + + Paramters: + parser: a Lark instance + term_subs: a dictionary of [Terminal name as str] to [output text as str] + """ + + write_tokens: WriteTokensTransformer + + def __init__(self, parser: Lark, term_subs: Optional[Dict[str, Callable[[Symbol], str]]]=None) -> None: + TreeMatcher.__init__(self, parser) + + self.write_tokens = WriteTokensTransformer({t.name:t for t in self.tokens}, term_subs or {}) + + def _reconstruct(self, tree): + unreduced_tree = self.match_tree(tree, tree.data) + + res = self.write_tokens.transform(unreduced_tree) + for item in res: + if isinstance(item, Tree): + # TODO use orig_expansion.rulename to support templates + yield from self._reconstruct(item) + else: + yield item + + def reconstruct(self, tree: ParseTree, postproc: Optional[Callable[[Iterable[str]], Iterable[str]]]=None, insert_spaces: bool=True) -> str: + x = self._reconstruct(tree) + if postproc: + x = postproc(x) + y = [] + prev_item = '' + for item in x: + if insert_spaces and prev_item and item and is_id_continue(prev_item[-1]) and is_id_continue(item[0]): + y.append(' ') + y.append(item) + prev_item = item + return ''.join(y) diff --git a/src/poetry/core/_vendor/lark/tools/__init__.py b/src/poetry/core/_vendor/lark/tools/__init__.py new file mode 100644 index 0000000..391f991 --- /dev/null +++ b/src/poetry/core/_vendor/lark/tools/__init__.py @@ -0,0 +1,64 @@ +import sys +from argparse import ArgumentParser, FileType +from textwrap import indent +from logging import DEBUG, INFO, WARN, ERROR +from typing import Optional +import warnings + +from lark import Lark, logger + +lalr_argparser = ArgumentParser(add_help=False, epilog='Look at the Lark documentation for more info on the options') + +flags = [ + ('d', 'debug'), + 'keep_all_tokens', + 'regex', + 'propagate_positions', + 'maybe_placeholders', + 'use_bytes' +] + +options = ['start', 'lexer'] + +lalr_argparser.add_argument('-v', '--verbose', action='count', default=0, help="Increase Logger output level, up to three times") +lalr_argparser.add_argument('-s', '--start', action='append', default=[]) +lalr_argparser.add_argument('-l', '--lexer', default='contextual', choices=('basic', 'contextual')) +encoding: Optional[str] = 'utf-8' if sys.version_info > (3, 4) else None +lalr_argparser.add_argument('-o', '--out', type=FileType('w', encoding=encoding), default=sys.stdout, help='the output file (default=stdout)') +lalr_argparser.add_argument('grammar_file', type=FileType('r', encoding=encoding), help='A valid .lark file') + +for flag in flags: + if isinstance(flag, tuple): + options.append(flag[1]) + lalr_argparser.add_argument('-' + flag[0], '--' + flag[1], action='store_true') + elif isinstance(flag, str): + options.append(flag) + lalr_argparser.add_argument('--' + flag, action='store_true') + else: + raise NotImplementedError("flags must only contain strings or tuples of strings") + + +def build_lalr(namespace): + logger.setLevel((ERROR, WARN, INFO, DEBUG)[min(namespace.verbose, 3)]) + if len(namespace.start) == 0: + namespace.start.append('start') + kwargs = {n: getattr(namespace, n) for n in options} + return Lark(namespace.grammar_file, parser='lalr', **kwargs), namespace.out + + +def showwarning_as_comment(message, category, filename, lineno, file=None, line=None): + # Based on warnings._showwarnmsg_impl + text = warnings.formatwarning(message, category, filename, lineno, line) + text = indent(text, '# ') + if file is None: + file = sys.stderr + if file is None: + return + try: + file.write(text) + except OSError: + pass + + +def make_warnings_comments(): + warnings.showwarning = showwarning_as_comment diff --git a/src/poetry/core/_vendor/lark/tools/nearley.py b/src/poetry/core/_vendor/lark/tools/nearley.py new file mode 100644 index 0000000..1fc27d5 --- /dev/null +++ b/src/poetry/core/_vendor/lark/tools/nearley.py @@ -0,0 +1,202 @@ +"Converts Nearley grammars to Lark" + +import os.path +import sys +import codecs +import argparse + + +from lark import Lark, Transformer, v_args + +nearley_grammar = r""" + start: (ruledef|directive)+ + + directive: "@" NAME (STRING|NAME) + | "@" JS -> js_code + ruledef: NAME "->" expansions + | NAME REGEXP "->" expansions -> macro + expansions: expansion ("|" expansion)* + + expansion: expr+ js + + ?expr: item (":" /[+*?]/)? + + ?item: rule|string|regexp|null + | "(" expansions ")" + + rule: NAME + string: STRING + regexp: REGEXP + null: "null" + JS: /{%.*?%}/s + js: JS? + + NAME: /[a-zA-Z_$]\w*/ + COMMENT: /#[^\n]*/ + REGEXP: /\[.*?\]/ + + STRING: _STRING "i"? + + %import common.ESCAPED_STRING -> _STRING + %import common.WS + %ignore WS + %ignore COMMENT + + """ + +nearley_grammar_parser = Lark(nearley_grammar, parser='earley', lexer='basic') + +def _get_rulename(name): + name = {'_': '_ws_maybe', '__': '_ws'}.get(name, name) + return 'n_' + name.replace('$', '__DOLLAR__').lower() + +@v_args(inline=True) +class NearleyToLark(Transformer): + def __init__(self): + self._count = 0 + self.extra_rules = {} + self.extra_rules_rev = {} + self.alias_js_code = {} + + def _new_function(self, code): + name = 'alias_%d' % self._count + self._count += 1 + + self.alias_js_code[name] = code + return name + + def _extra_rule(self, rule): + if rule in self.extra_rules_rev: + return self.extra_rules_rev[rule] + + name = 'xrule_%d' % len(self.extra_rules) + assert name not in self.extra_rules + self.extra_rules[name] = rule + self.extra_rules_rev[rule] = name + return name + + def rule(self, name): + return _get_rulename(name) + + def ruledef(self, name, exps): + return '!%s: %s' % (_get_rulename(name), exps) + + def expr(self, item, op): + rule = '(%s)%s' % (item, op) + return self._extra_rule(rule) + + def regexp(self, r): + return '/%s/' % r + + def null(self): + return '' + + def string(self, s): + return self._extra_rule(s) + + def expansion(self, *x): + x, js = x[:-1], x[-1] + if js.children: + js_code ,= js.children + js_code = js_code[2:-2] + alias = '-> ' + self._new_function(js_code) + else: + alias = '' + return ' '.join(x) + alias + + def expansions(self, *x): + return '%s' % ('\n |'.join(x)) + + def start(self, *rules): + return '\n'.join(filter(None, rules)) + +def _nearley_to_lark(g, builtin_path, n2l, js_code, folder_path, includes): + rule_defs = [] + + tree = nearley_grammar_parser.parse(g) + for statement in tree.children: + if statement.data == 'directive': + directive, arg = statement.children + if directive in ('builtin', 'include'): + folder = builtin_path if directive == 'builtin' else folder_path + path = os.path.join(folder, arg[1:-1]) + if path not in includes: + includes.add(path) + with codecs.open(path, encoding='utf8') as f: + text = f.read() + rule_defs += _nearley_to_lark(text, builtin_path, n2l, js_code, os.path.abspath(os.path.dirname(path)), includes) + else: + assert False, directive + elif statement.data == 'js_code': + code ,= statement.children + code = code[2:-2] + js_code.append(code) + elif statement.data == 'macro': + pass # TODO Add support for macros! + elif statement.data == 'ruledef': + rule_defs.append(n2l.transform(statement)) + else: + raise Exception("Unknown statement: %s" % statement) + + return rule_defs + + +def create_code_for_nearley_grammar(g, start, builtin_path, folder_path, es6=False): + import js2py + + emit_code = [] + def emit(x=None): + if x: + emit_code.append(x) + emit_code.append('\n') + + js_code = ['function id(x) {return x[0];}'] + n2l = NearleyToLark() + rule_defs = _nearley_to_lark(g, builtin_path, n2l, js_code, folder_path, set()) + lark_g = '\n'.join(rule_defs) + lark_g += '\n'+'\n'.join('!%s: %s' % item for item in n2l.extra_rules.items()) + + emit('from lark import Lark, Transformer') + emit() + emit('grammar = ' + repr(lark_g)) + emit() + + for alias, code in n2l.alias_js_code.items(): + js_code.append('%s = (%s);' % (alias, code)) + + if es6: + emit(js2py.translate_js6('\n'.join(js_code))) + else: + emit(js2py.translate_js('\n'.join(js_code))) + emit('class TransformNearley(Transformer):') + for alias in n2l.alias_js_code: + emit(" %s = var.get('%s').to_python()" % (alias, alias)) + emit(" __default__ = lambda self, n, c, m: c if c else None") + + emit() + emit('parser = Lark(grammar, start="n_%s", maybe_placeholders=False)' % start) + emit('def parse(text):') + emit(' return TransformNearley().transform(parser.parse(text))') + + return ''.join(emit_code) + +def main(fn, start, nearley_lib, es6=False): + with codecs.open(fn, encoding='utf8') as f: + grammar = f.read() + return create_code_for_nearley_grammar(grammar, start, os.path.join(nearley_lib, 'builtin'), os.path.abspath(os.path.dirname(fn)), es6=es6) + +def get_arg_parser(): + parser = argparse.ArgumentParser(description='Reads a Nearley grammar (with js functions), and outputs an equivalent lark parser.') + parser.add_argument('nearley_grammar', help='Path to the file containing the nearley grammar') + parser.add_argument('start_rule', help='Rule within the nearley grammar to make the base rule') + parser.add_argument('nearley_lib', help='Path to root directory of nearley codebase (used for including builtins)') + parser.add_argument('--es6', help='Enable experimental ES6 support', action='store_true') + return parser + +if __name__ == '__main__': + parser = get_arg_parser() + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + args = parser.parse_args() + print(main(fn=args.nearley_grammar, start=args.start_rule, nearley_lib=args.nearley_lib, es6=args.es6)) diff --git a/src/poetry/core/_vendor/lark/tools/serialize.py b/src/poetry/core/_vendor/lark/tools/serialize.py new file mode 100644 index 0000000..6154024 --- /dev/null +++ b/src/poetry/core/_vendor/lark/tools/serialize.py @@ -0,0 +1,34 @@ +import codecs +import sys +import json + +from lark import Lark +from lark.grammar import RuleOptions, Rule +from lark.lexer import TerminalDef +from lark.tools import lalr_argparser, build_lalr + +import argparse + +argparser = argparse.ArgumentParser(prog='python -m lark.tools.serialize', parents=[lalr_argparser], + description="Lark Serialization Tool - Stores Lark's internal state & LALR analysis as a JSON file", + epilog='Look at the Lark documentation for more info on the options') + + +def serialize(lark_inst, outfile): + data, memo = lark_inst.memo_serialize([TerminalDef, Rule]) + outfile.write('{\n') + outfile.write(' "data": %s,\n' % json.dumps(data)) + outfile.write(' "memo": %s\n' % json.dumps(memo)) + outfile.write('}\n') + + +def main(): + if len(sys.argv)==1: + argparser.print_help(sys.stderr) + sys.exit(1) + ns = argparser.parse_args() + serialize(*build_lalr(ns)) + + +if __name__ == '__main__': + main() diff --git a/src/poetry/core/_vendor/lark/tools/standalone.py b/src/poetry/core/_vendor/lark/tools/standalone.py new file mode 100644 index 0000000..3ae2cdb --- /dev/null +++ b/src/poetry/core/_vendor/lark/tools/standalone.py @@ -0,0 +1,194 @@ +###{standalone +# +# +# Lark Stand-alone Generator Tool +# ---------------------------------- +# Generates a stand-alone LALR(1) parser +# +# Git: https://github.com/erezsh/lark +# Author: Erez Shinan (erezshin@gmail.com) +# +# +# >>> LICENSE +# +# This tool and its generated code use a separate license from Lark, +# and are subject to the terms of the Mozilla Public License, v. 2.0. +# If a copy of the MPL was not distributed with this +# file, You can obtain one at https://mozilla.org/MPL/2.0/. +# +# If you wish to purchase a commercial license for this tool and its +# generated code, you may contact me via email or otherwise. +# +# If MPL2 is incompatible with your free or open-source project, +# contact me and we'll work it out. +# +# + +from abc import ABC, abstractmethod +from collections.abc import Sequence +from types import ModuleType +from typing import ( + TypeVar, Generic, Type, Tuple, List, Dict, Iterator, Collection, Callable, Optional, FrozenSet, Any, + Union, Iterable, IO, TYPE_CHECKING, overload, + Pattern as REPattern, ClassVar, Set, Mapping +) +###} + +import sys +import token, tokenize +import os +from os import path +from collections import defaultdict +from functools import partial +from argparse import ArgumentParser + +import lark +from lark.tools import lalr_argparser, build_lalr, make_warnings_comments + + +from lark.grammar import Rule +from lark.lexer import TerminalDef + +_dir = path.dirname(__file__) +_larkdir = path.join(_dir, path.pardir) + + +EXTRACT_STANDALONE_FILES = [ + 'tools/standalone.py', + 'exceptions.py', + 'utils.py', + 'tree.py', + 'visitors.py', + 'grammar.py', + 'lexer.py', + 'common.py', + 'parse_tree_builder.py', + 'parsers/lalr_parser.py', + 'parsers/lalr_analysis.py', + 'parser_frontends.py', + 'lark.py', + 'indenter.py', +] + +def extract_sections(lines): + section = None + text = [] + sections = defaultdict(list) + for line in lines: + if line.startswith('###'): + if line[3] == '{': + section = line[4:].strip() + elif line[3] == '}': + sections[section] += text + section = None + text = [] + else: + raise ValueError(line) + elif section: + text.append(line) + + return {name: ''.join(text) for name, text in sections.items()} + + +def strip_docstrings(line_gen): + """ Strip comments and docstrings from a file. + Based on code from: https://stackoverflow.com/questions/1769332/script-to-remove-python-comments-docstrings + """ + res = [] + + prev_toktype = token.INDENT + last_lineno = -1 + last_col = 0 + + tokgen = tokenize.generate_tokens(line_gen) + for toktype, ttext, (slineno, scol), (elineno, ecol), ltext in tokgen: + if slineno > last_lineno: + last_col = 0 + if scol > last_col: + res.append(" " * (scol - last_col)) + if toktype == token.STRING and prev_toktype == token.INDENT: + # Docstring + res.append("#--") + elif toktype == tokenize.COMMENT: + # Comment + res.append("##\n") + else: + res.append(ttext) + prev_toktype = toktype + last_col = ecol + last_lineno = elineno + + return ''.join(res) + + +def gen_standalone(lark_inst, output=None, out=sys.stdout, compress=False): + if output is None: + output = partial(print, file=out) + + import pickle, zlib, base64 + def compressed_output(obj): + s = pickle.dumps(obj, pickle.HIGHEST_PROTOCOL) + c = zlib.compress(s) + output(repr(base64.b64encode(c))) + + def output_decompress(name): + output('%(name)s = pickle.loads(zlib.decompress(base64.b64decode(%(name)s)))' % locals()) + + output('# The file was automatically generated by Lark v%s' % lark.__version__) + output('__version__ = "%s"' % lark.__version__) + output() + + for i, pyfile in enumerate(EXTRACT_STANDALONE_FILES): + with open(os.path.join(_larkdir, pyfile)) as f: + code = extract_sections(f)['standalone'] + if i: # if not this file + code = strip_docstrings(partial(next, iter(code.splitlines(True)))) + output(code) + + data, m = lark_inst.memo_serialize([TerminalDef, Rule]) + output('import pickle, zlib, base64') + if compress: + output('DATA = (') + compressed_output(data) + output(')') + output_decompress('DATA') + output('MEMO = (') + compressed_output(m) + output(')') + output_decompress('MEMO') + else: + output('DATA = (') + output(data) + output(')') + output('MEMO = (') + output(m) + output(')') + + + output('Shift = 0') + output('Reduce = 1') + output("def Lark_StandAlone(**kwargs):") + output(" return Lark._load_from_dict(DATA, MEMO, **kwargs)") + + + + +def main(): + make_warnings_comments() + parser = ArgumentParser(prog="prog='python -m lark.tools.standalone'", description="Lark Stand-alone Generator Tool", + parents=[lalr_argparser], epilog='Look at the Lark documentation for more info on the options') + parser.add_argument('-c', '--compress', action='store_true', default=0, help="Enable compression") + if len(sys.argv) == 1: + parser.print_help(sys.stderr) + sys.exit(1) + ns = parser.parse_args() + + lark_inst, out = build_lalr(ns) + gen_standalone(lark_inst, out=out, compress=ns.compress) + + ns.out.close() + ns.grammar_file.close() + + +if __name__ == '__main__': + main() diff --git a/src/poetry/core/_vendor/lark/tree.py b/src/poetry/core/_vendor/lark/tree.py new file mode 100644 index 0000000..7ad620f --- /dev/null +++ b/src/poetry/core/_vendor/lark/tree.py @@ -0,0 +1,263 @@ +import sys +from copy import deepcopy + +from typing import List, Callable, Iterator, Union, Optional, Generic, TypeVar, Any, TYPE_CHECKING + +if TYPE_CHECKING: + from .lexer import TerminalDef, Token + import rich + if sys.version_info >= (3, 8): + from typing import Literal + else: + from typing_extensions import Literal + +###{standalone +from collections import OrderedDict + +class Meta: + + empty: bool + line: int + column: int + start_pos: int + end_line: int + end_column: int + end_pos: int + orig_expansion: 'List[TerminalDef]' + match_tree: bool + + def __init__(self): + self.empty = True + + +_Leaf_T = TypeVar("_Leaf_T") +Branch = Union[_Leaf_T, 'Tree[_Leaf_T]'] + + +class Tree(Generic[_Leaf_T]): + """The main tree class. + + Creates a new tree, and stores "data" and "children" in attributes of the same name. + Trees can be hashed and compared. + + Parameters: + data: The name of the rule or alias + children: List of matched sub-rules and terminals + meta: Line & Column numbers (if ``propagate_positions`` is enabled). + meta attributes: line, column, start_pos, end_line, end_column, end_pos + """ + + data: str + children: 'List[Branch[_Leaf_T]]' + + def __init__(self, data: str, children: 'List[Branch[_Leaf_T]]', meta: Optional[Meta]=None) -> None: + self.data = data + self.children = children + self._meta = meta + + @property + def meta(self) -> Meta: + if self._meta is None: + self._meta = Meta() + return self._meta + + def __repr__(self): + return 'Tree(%r, %r)' % (self.data, self.children) + + def _pretty_label(self): + return self.data + + def _pretty(self, level, indent_str): + if len(self.children) == 1 and not isinstance(self.children[0], Tree): + return [indent_str*level, self._pretty_label(), '\t', '%s' % (self.children[0],), '\n'] + + l = [indent_str*level, self._pretty_label(), '\n'] + for n in self.children: + if isinstance(n, Tree): + l += n._pretty(level+1, indent_str) + else: + l += [indent_str*(level+1), '%s' % (n,), '\n'] + + return l + + def pretty(self, indent_str: str=' ') -> str: + """Returns an indented string representation of the tree. + + Great for debugging. + """ + return ''.join(self._pretty(0, indent_str)) + + def __rich__(self, parent:'rich.tree.Tree'=None) -> 'rich.tree.Tree': + """Returns a tree widget for the 'rich' library. + + Example: + :: + from rich import print + from lark import Tree + + tree = Tree('root', ['node1', 'node2']) + print(tree) + """ + return self._rich(parent) + + def _rich(self, parent): + if parent: + tree = parent.add(f'[bold]{self.data}[/bold]') + else: + import rich.tree + tree = rich.tree.Tree(self.data) + + for c in self.children: + if isinstance(c, Tree): + c._rich(tree) + else: + tree.add(f'[green]{c}[/green]') + + return tree + + def __eq__(self, other): + try: + return self.data == other.data and self.children == other.children + except AttributeError: + return False + + def __ne__(self, other): + return not (self == other) + + def __hash__(self) -> int: + return hash((self.data, tuple(self.children))) + + def iter_subtrees(self) -> 'Iterator[Tree[_Leaf_T]]': + """Depth-first iteration. + + Iterates over all the subtrees, never returning to the same node twice (Lark's parse-tree is actually a DAG). + """ + queue = [self] + subtrees = OrderedDict() + for subtree in queue: + subtrees[id(subtree)] = subtree + # Reason for type ignore https://github.com/python/mypy/issues/10999 + queue += [c for c in reversed(subtree.children) # type: ignore[misc] + if isinstance(c, Tree) and id(c) not in subtrees] + + del queue + return reversed(list(subtrees.values())) + + def iter_subtrees_topdown(self): + """Breadth-first iteration. + + Iterates over all the subtrees, return nodes in order like pretty() does. + """ + stack = [self] + while stack: + node = stack.pop() + if not isinstance(node, Tree): + continue + yield node + for child in reversed(node.children): + stack.append(child) + + def find_pred(self, pred: 'Callable[[Tree[_Leaf_T]], bool]') -> 'Iterator[Tree[_Leaf_T]]': + """Returns all nodes of the tree that evaluate pred(node) as true.""" + return filter(pred, self.iter_subtrees()) + + def find_data(self, data: str) -> 'Iterator[Tree[_Leaf_T]]': + """Returns all nodes of the tree whose data equals the given data.""" + return self.find_pred(lambda t: t.data == data) + +###} + + def expand_kids_by_data(self, *data_values): + """Expand (inline) children with any of the given data values. Returns True if anything changed""" + changed = False + for i in range(len(self.children)-1, -1, -1): + child = self.children[i] + if isinstance(child, Tree) and child.data in data_values: + self.children[i:i+1] = child.children + changed = True + return changed + + + def scan_values(self, pred: 'Callable[[Branch[_Leaf_T]], bool]') -> Iterator[_Leaf_T]: + """Return all values in the tree that evaluate pred(value) as true. + + This can be used to find all the tokens in the tree. + + Example: + >>> all_tokens = tree.scan_values(lambda v: isinstance(v, Token)) + """ + for c in self.children: + if isinstance(c, Tree): + for t in c.scan_values(pred): + yield t + else: + if pred(c): + yield c + + def __deepcopy__(self, memo): + return type(self)(self.data, deepcopy(self.children, memo), meta=self._meta) + + def copy(self) -> 'Tree[_Leaf_T]': + return type(self)(self.data, self.children) + + def set(self, data: str, children: 'List[Branch[_Leaf_T]]') -> None: + self.data = data + self.children = children + + +ParseTree = Tree['Token'] + + +class SlottedTree(Tree): + __slots__ = 'data', 'children', 'rule', '_meta' + + +def pydot__tree_to_png(tree: Tree, filename: str, rankdir: 'Literal["TB", "LR", "BT", "RL"]'="LR", **kwargs) -> None: + graph = pydot__tree_to_graph(tree, rankdir, **kwargs) + graph.write_png(filename) + + +def pydot__tree_to_dot(tree: Tree, filename, rankdir="LR", **kwargs): + graph = pydot__tree_to_graph(tree, rankdir, **kwargs) + graph.write(filename) + + +def pydot__tree_to_graph(tree: Tree, rankdir="LR", **kwargs): + """Creates a colorful image that represents the tree (data+children, without meta) + + Possible values for `rankdir` are "TB", "LR", "BT", "RL", corresponding to + directed graphs drawn from top to bottom, from left to right, from bottom to + top, and from right to left, respectively. + + `kwargs` can be any graph attribute (e. g. `dpi=200`). For a list of + possible attributes, see https://www.graphviz.org/doc/info/attrs.html. + """ + + import pydot # type: ignore[import] + graph = pydot.Dot(graph_type='digraph', rankdir=rankdir, **kwargs) + + i = [0] + + def new_leaf(leaf): + node = pydot.Node(i[0], label=repr(leaf)) + i[0] += 1 + graph.add_node(node) + return node + + def _to_pydot(subtree): + color = hash(subtree.data) & 0xffffff + color |= 0x808080 + + subnodes = [_to_pydot(child) if isinstance(child, Tree) else new_leaf(child) + for child in subtree.children] + node = pydot.Node(i[0], style="filled", fillcolor="#%x" % color, label=subtree.data) + i[0] += 1 + graph.add_node(node) + + for subnode in subnodes: + graph.add_edge(pydot.Edge(node, subnode)) + + return node + + _to_pydot(tree) + return graph diff --git a/src/poetry/core/_vendor/lark/tree_matcher.py b/src/poetry/core/_vendor/lark/tree_matcher.py new file mode 100644 index 0000000..fdcd2bf --- /dev/null +++ b/src/poetry/core/_vendor/lark/tree_matcher.py @@ -0,0 +1,186 @@ +"""Tree matcher based on Lark grammar""" + +import re +from collections import defaultdict + +from . import Tree, Token +from .common import ParserConf +from .parsers import earley +from .grammar import Rule, Terminal, NonTerminal + + +def is_discarded_terminal(t): + return t.is_term and t.filter_out + + +class _MakeTreeMatch: + def __init__(self, name, expansion): + self.name = name + self.expansion = expansion + + def __call__(self, args): + t = Tree(self.name, args) + t.meta.match_tree = True + t.meta.orig_expansion = self.expansion + return t + + +def _best_from_group(seq, group_key, cmp_key): + d = {} + for item in seq: + key = group_key(item) + if key in d: + v1 = cmp_key(item) + v2 = cmp_key(d[key]) + if v2 > v1: + d[key] = item + else: + d[key] = item + return list(d.values()) + + +def _best_rules_from_group(rules): + rules = _best_from_group(rules, lambda r: r, lambda r: -len(r.expansion)) + rules.sort(key=lambda r: len(r.expansion)) + return rules + + +def _match(term, token): + if isinstance(token, Tree): + name, _args = parse_rulename(term.name) + return token.data == name + elif isinstance(token, Token): + return term == Terminal(token.type) + assert False, (term, token) + + +def make_recons_rule(origin, expansion, old_expansion): + return Rule(origin, expansion, alias=_MakeTreeMatch(origin.name, old_expansion)) + + +def make_recons_rule_to_term(origin, term): + return make_recons_rule(origin, [Terminal(term.name)], [term]) + + +def parse_rulename(s): + "Parse rule names that may contain a template syntax (like rule{a, b, ...})" + name, args_str = re.match(r'(\w+)(?:{(.+)})?', s).groups() + args = args_str and [a.strip() for a in args_str.split(',')] + return name, args + + + +class ChildrenLexer: + def __init__(self, children): + self.children = children + + def lex(self, parser_state): + return self.children + +class TreeMatcher: + """Match the elements of a tree node, based on an ontology + provided by a Lark grammar. + + Supports templates and inlined rules (`rule{a, b,..}` and `_rule`) + + Initiialize with an instance of Lark. + """ + + def __init__(self, parser): + # XXX TODO calling compile twice returns different results! + assert not parser.options.maybe_placeholders + # XXX TODO: we just ignore the potential existence of a postlexer + self.tokens, rules, _extra = parser.grammar.compile(parser.options.start, set()) + + self.rules_for_root = defaultdict(list) + + self.rules = list(self._build_recons_rules(rules)) + self.rules.reverse() + + # Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation. + self.rules = _best_rules_from_group(self.rules) + + self.parser = parser + self._parser_cache = {} + + def _build_recons_rules(self, rules): + "Convert tree-parsing/construction rules to tree-matching rules" + expand1s = {r.origin for r in rules if r.options.expand1} + + aliases = defaultdict(list) + for r in rules: + if r.alias: + aliases[r.origin].append(r.alias) + + rule_names = {r.origin for r in rules} + nonterminals = {sym for sym in rule_names + if sym.name.startswith('_') or sym in expand1s or sym in aliases} + + seen = set() + for r in rules: + recons_exp = [sym if sym in nonterminals else Terminal(sym.name) + for sym in r.expansion if not is_discarded_terminal(sym)] + + # Skip self-recursive constructs + if recons_exp == [r.origin] and r.alias is None: + continue + + sym = NonTerminal(r.alias) if r.alias else r.origin + rule = make_recons_rule(sym, recons_exp, r.expansion) + + if sym in expand1s and len(recons_exp) != 1: + self.rules_for_root[sym.name].append(rule) + + if sym.name not in seen: + yield make_recons_rule_to_term(sym, sym) + seen.add(sym.name) + else: + if sym.name.startswith('_') or sym in expand1s: + yield rule + else: + self.rules_for_root[sym.name].append(rule) + + for origin, rule_aliases in aliases.items(): + for alias in rule_aliases: + yield make_recons_rule_to_term(origin, NonTerminal(alias)) + yield make_recons_rule_to_term(origin, origin) + + def match_tree(self, tree, rulename): + """Match the elements of `tree` to the symbols of rule `rulename`. + + Parameters: + tree (Tree): the tree node to match + rulename (str): The expected full rule name (including template args) + + Returns: + Tree: an unreduced tree that matches `rulename` + + Raises: + UnexpectedToken: If no match was found. + + Note: + It's the callers' responsibility match the tree recursively. + """ + if rulename: + # validate + name, _args = parse_rulename(rulename) + assert tree.data == name + else: + rulename = tree.data + + # TODO: ambiguity? + try: + parser = self._parser_cache[rulename] + except KeyError: + rules = self.rules + _best_rules_from_group(self.rules_for_root[rulename]) + + # TODO pass callbacks through dict, instead of alias? + callbacks = {rule: rule.alias for rule in rules} + conf = ParserConf(rules, callbacks, [rulename]) + parser = earley.Parser(self.parser.lexer_conf, conf, _match, resolve_ambiguity=True) + self._parser_cache[rulename] = parser + + # find a full derivation + unreduced_tree = parser.parse(ChildrenLexer(tree.children), rulename) + assert unreduced_tree.data == rulename + return unreduced_tree diff --git a/src/poetry/core/_vendor/lark/tree_templates.py b/src/poetry/core/_vendor/lark/tree_templates.py new file mode 100644 index 0000000..03eaa27 --- /dev/null +++ b/src/poetry/core/_vendor/lark/tree_templates.py @@ -0,0 +1,180 @@ +"""This module defines utilities for matching and translation tree templates. + +A tree templates is a tree that contains nodes that are template variables. + +""" + +from typing import Union, Optional, Mapping, Dict, Tuple, Iterator + +from lark import Tree, Transformer +from lark.exceptions import MissingVariableError + +Branch = Union[Tree[str], str] +TreeOrCode = Union[Tree[str], str] +MatchResult = Dict[str, Tree] +_TEMPLATE_MARKER = '$' + + +class TemplateConf: + """Template Configuration + + Allows customization for different uses of Template + + parse() must return a Tree instance. + """ + + def __init__(self, parse=None): + self._parse = parse + + def test_var(self, var: Union[Tree[str], str]) -> Optional[str]: + """Given a tree node, if it is a template variable return its name. Otherwise, return None. + + This method may be overridden for customization + + Parameters: + var: Tree | str - The tree node to test + + """ + if isinstance(var, str): + return _get_template_name(var) + + if ( + isinstance(var, Tree) + and var.data == "var" + and len(var.children) > 0 + and isinstance(var.children[0], str) + ): + return _get_template_name(var.children[0]) + + return None + + def _get_tree(self, template: TreeOrCode) -> Tree[str]: + if isinstance(template, str): + assert self._parse + template = self._parse(template) + + if not isinstance(template, Tree): + raise TypeError("template parser must return a Tree instance") + + return template + + def __call__(self, template: Tree[str]) -> 'Template': + return Template(template, conf=self) + + def _match_tree_template(self, template: TreeOrCode, tree: Branch) -> Optional[MatchResult]: + """Returns dict of {var: match} if found a match, else None + """ + template_var = self.test_var(template) + if template_var: + if not isinstance(tree, Tree): + raise TypeError(f"Template variables can only match Tree instances. Not {tree!r}") + return {template_var: tree} + + if isinstance(template, str): + if template == tree: + return {} + return None + + assert isinstance(template, Tree) and isinstance(tree, Tree), f"template={template} tree={tree}" + + if template.data == tree.data and len(template.children) == len(tree.children): + res = {} + for t1, t2 in zip(template.children, tree.children): + matches = self._match_tree_template(t1, t2) + if matches is None: + return None + + res.update(matches) + + return res + + return None + + +class _ReplaceVars(Transformer[str, Tree[str]]): + def __init__(self, conf: TemplateConf, vars: Mapping[str, Tree[str]]) -> None: + super().__init__() + self._conf = conf + self._vars = vars + + def __default__(self, data, children, meta) -> Tree[str]: + tree = super().__default__(data, children, meta) + + var = self._conf.test_var(tree) + if var: + try: + return self._vars[var] + except KeyError: + raise MissingVariableError(f"No mapping for template variable ({var})") + return tree + + +class Template: + """Represents a tree template, tied to a specific configuration + + A tree template is a tree that contains nodes that are template variables. + Those variables will match any tree. + (future versions may support annotations on the variables, to allow more complex templates) + """ + + def __init__(self, tree: Tree[str], conf: TemplateConf = TemplateConf()): + self.conf = conf + self.tree = conf._get_tree(tree) + + def match(self, tree: TreeOrCode) -> Optional[MatchResult]: + """Match a tree template to a tree. + + A tree template without variables will only match ``tree`` if it is equal to the template. + + Parameters: + tree (Tree): The tree to match to the template + + Returns: + Optional[Dict[str, Tree]]: If match is found, returns a dictionary mapping + template variable names to their matching tree nodes. + If no match was found, returns None. + """ + tree = self.conf._get_tree(tree) + return self.conf._match_tree_template(self.tree, tree) + + def search(self, tree: TreeOrCode) -> Iterator[Tuple[Tree[str], MatchResult]]: + """Search for all occurances of the tree template inside ``tree``. + """ + tree = self.conf._get_tree(tree) + for subtree in tree.iter_subtrees(): + res = self.match(subtree) + if res: + yield subtree, res + + def apply_vars(self, vars: Mapping[str, Tree[str]]) -> Tree[str]: + """Apply vars to the template tree + """ + return _ReplaceVars(self.conf, vars).transform(self.tree) + + +def translate(t1: Template, t2: Template, tree: TreeOrCode): + """Search tree and translate each occurrance of t1 into t2. + """ + tree = t1.conf._get_tree(tree) # ensure it's a tree, parse if necessary and possible + for subtree, vars in t1.search(tree): + res = t2.apply_vars(vars) + subtree.set(res.data, res.children) + return tree + + +class TemplateTranslator: + """Utility class for translating a collection of patterns + """ + + def __init__(self, translations: Mapping[Template, Template]): + assert all(isinstance(k, Template) and isinstance(v, Template) for k, v in translations.items()) + self.translations = translations + + def translate(self, tree: Tree[str]): + for k, v in self.translations.items(): + tree = translate(k, v, tree) + return tree + + +def _get_template_name(value: str) -> Optional[str]: + return value.lstrip(_TEMPLATE_MARKER) if value.startswith(_TEMPLATE_MARKER) else None diff --git a/src/poetry/core/_vendor/lark/utils.py b/src/poetry/core/_vendor/lark/utils.py new file mode 100644 index 0000000..6781e6f --- /dev/null +++ b/src/poetry/core/_vendor/lark/utils.py @@ -0,0 +1,339 @@ +import unicodedata +import os +from functools import reduce +from collections import deque +from typing import Callable, Iterator, List, Optional, Tuple, Type, TypeVar, Union, Dict, Any, Sequence + +###{standalone +import sys, re +import logging + +logger: logging.Logger = logging.getLogger("lark") +logger.addHandler(logging.StreamHandler()) +# Set to highest level, since we have some warnings amongst the code +# By default, we should not output any log messages +logger.setLevel(logging.CRITICAL) + + +NO_VALUE = object() + +T = TypeVar("T") + + +def classify(seq: Sequence, key: Optional[Callable] = None, value: Optional[Callable] = None) -> Dict: + d: Dict[Any, Any] = {} + for item in seq: + k = key(item) if (key is not None) else item + v = value(item) if (value is not None) else item + if k in d: + d[k].append(v) + else: + d[k] = [v] + return d + + +def _deserialize(data: Any, namespace: Dict[str, Any], memo: Dict) -> Any: + if isinstance(data, dict): + if '__type__' in data: # Object + class_ = namespace[data['__type__']] + return class_.deserialize(data, memo) + elif '@' in data: + return memo[data['@']] + return {key:_deserialize(value, namespace, memo) for key, value in data.items()} + elif isinstance(data, list): + return [_deserialize(value, namespace, memo) for value in data] + return data + + +_T = TypeVar("_T", bound="Serialize") + +class Serialize: + """Safe-ish serialization interface that doesn't rely on Pickle + + Attributes: + __serialize_fields__ (List[str]): Fields (aka attributes) to serialize. + __serialize_namespace__ (list): List of classes that deserialization is allowed to instantiate. + Should include all field types that aren't builtin types. + """ + + def memo_serialize(self, types_to_memoize: List) -> Any: + memo = SerializeMemoizer(types_to_memoize) + return self.serialize(memo), memo.serialize() + + def serialize(self, memo = None) -> Dict[str, Any]: + if memo and memo.in_types(self): + return {'@': memo.memoized.get(self)} + + fields = getattr(self, '__serialize_fields__') + res = {f: _serialize(getattr(self, f), memo) for f in fields} + res['__type__'] = type(self).__name__ + if hasattr(self, '_serialize'): + self._serialize(res, memo) # type: ignore[attr-defined] + return res + + @classmethod + def deserialize(cls: Type[_T], data: Dict[str, Any], memo: Dict[int, Any]) -> _T: + namespace = getattr(cls, '__serialize_namespace__', []) + namespace = {c.__name__:c for c in namespace} + + fields = getattr(cls, '__serialize_fields__') + + if '@' in data: + return memo[data['@']] + + inst = cls.__new__(cls) + for f in fields: + try: + setattr(inst, f, _deserialize(data[f], namespace, memo)) + except KeyError as e: + raise KeyError("Cannot find key for class", cls, e) + + if hasattr(inst, '_deserialize'): + inst._deserialize() # type: ignore[attr-defined] + + return inst + + +class SerializeMemoizer(Serialize): + "A version of serialize that memoizes objects to reduce space" + + __serialize_fields__ = 'memoized', + + def __init__(self, types_to_memoize: List) -> None: + self.types_to_memoize = tuple(types_to_memoize) + self.memoized = Enumerator() + + def in_types(self, value: Serialize) -> bool: + return isinstance(value, self.types_to_memoize) + + def serialize(self) -> Dict[int, Any]: # type: ignore[override] + return _serialize(self.memoized.reversed(), None) + + @classmethod + def deserialize(cls, data: Dict[int, Any], namespace: Dict[str, Any], memo: Dict[Any, Any]) -> Dict[int, Any]: # type: ignore[override] + return _deserialize(data, namespace, memo) + + +try: + import regex + _has_regex = True +except ImportError: + _has_regex = False + +if sys.version_info >= (3, 11): + import re._parser as sre_parse + import re._constants as sre_constants +else: + import sre_parse + import sre_constants + +categ_pattern = re.compile(r'\\p{[A-Za-z_]+}') + +def get_regexp_width(expr: str) -> Union[Tuple[int, int], List[int]]: + if _has_regex: + # Since `sre_parse` cannot deal with Unicode categories of the form `\p{Mn}`, we replace these with + # a simple letter, which makes no difference as we are only trying to get the possible lengths of the regex + # match here below. + regexp_final = re.sub(categ_pattern, 'A', expr) + else: + if re.search(categ_pattern, expr): + raise ImportError('`regex` module must be installed in order to use Unicode categories.', expr) + regexp_final = expr + try: + # Fixed in next version (past 0.960) of typeshed + return [int(x) for x in sre_parse.parse(regexp_final).getwidth()] # type: ignore[attr-defined] + except sre_constants.error: + if not _has_regex: + raise ValueError(expr) + else: + # sre_parse does not support the new features in regex. To not completely fail in that case, + # we manually test for the most important info (whether the empty string is matched) + c = regex.compile(regexp_final) + if c.match('') is None: + # MAXREPEAT is a none pickable subclass of int, therefore needs to be converted to enable caching + return 1, int(sre_constants.MAXREPEAT) + else: + return 0, int(sre_constants.MAXREPEAT) + +###} + + +_ID_START = 'Lu', 'Ll', 'Lt', 'Lm', 'Lo', 'Mn', 'Mc', 'Pc' +_ID_CONTINUE = _ID_START + ('Nd', 'Nl',) + +def _test_unicode_category(s: str, categories: Sequence[str]) -> bool: + if len(s) != 1: + return all(_test_unicode_category(char, categories) for char in s) + return s == '_' or unicodedata.category(s) in categories + +def is_id_continue(s: str) -> bool: + """ + Checks if all characters in `s` are alphanumeric characters (Unicode standard, so diacritics, indian vowels, non-latin + numbers, etc. all pass). Synonymous with a Python `ID_CONTINUE` identifier. See PEP 3131 for details. + """ + return _test_unicode_category(s, _ID_CONTINUE) + +def is_id_start(s: str) -> bool: + """ + Checks if all characters in `s` are alphabetic characters (Unicode standard, so diacritics, indian vowels, non-latin + numbers, etc. all pass). Synonymous with a Python `ID_START` identifier. See PEP 3131 for details. + """ + return _test_unicode_category(s, _ID_START) + + +def dedup_list(l: List[T]) -> List[T]: + """Given a list (l) will removing duplicates from the list, + preserving the original order of the list. Assumes that + the list entries are hashable.""" + dedup = set() + # This returns None, but that's expected + return [x for x in l if not (x in dedup or dedup.add(x))] # type: ignore[func-returns-value] + # 2x faster (ordered in PyPy and CPython 3.6+, gaurenteed to be ordered in Python 3.7+) + # return list(dict.fromkeys(l)) + + +class Enumerator(Serialize): + def __init__(self) -> None: + self.enums: Dict[Any, int] = {} + + def get(self, item) -> int: + if item not in self.enums: + self.enums[item] = len(self.enums) + return self.enums[item] + + def __len__(self): + return len(self.enums) + + def reversed(self) -> Dict[int, Any]: + r = {v: k for k, v in self.enums.items()} + assert len(r) == len(self.enums) + return r + + + +def combine_alternatives(lists): + """ + Accepts a list of alternatives, and enumerates all their possible concatinations. + + Examples: + >>> combine_alternatives([range(2), [4,5]]) + [[0, 4], [0, 5], [1, 4], [1, 5]] + + >>> combine_alternatives(["abc", "xy", '$']) + [['a', 'x', '$'], ['a', 'y', '$'], ['b', 'x', '$'], ['b', 'y', '$'], ['c', 'x', '$'], ['c', 'y', '$']] + + >>> combine_alternatives([]) + [[]] + """ + if not lists: + return [[]] + assert all(l for l in lists), lists + init = [[x] for x in lists[0]] + return reduce(lambda a,b: [i+[j] for i in a for j in b], lists[1:], init) + + +try: + import atomicwrites + _has_atomicwrites = True +except ImportError: + _has_atomicwrites = False + +class FS: + exists = staticmethod(os.path.exists) + + @staticmethod + def open(name, mode="r", **kwargs): + if _has_atomicwrites and "w" in mode: + return atomicwrites.atomic_write(name, mode=mode, overwrite=True, **kwargs) + else: + return open(name, mode, **kwargs) + + + +def isascii(s: str) -> bool: + """ str.isascii only exists in python3.7+ """ + if sys.version_info >= (3, 7): + return s.isascii() + else: + try: + s.encode('ascii') + return True + except (UnicodeDecodeError, UnicodeEncodeError): + return False + + +class fzset(frozenset): + def __repr__(self): + return '{%s}' % ', '.join(map(repr, self)) + + +def classify_bool(seq: Sequence, pred: Callable) -> Any: + true_elems = [] + false_elems = [] + + for elem in seq: + if pred(elem): + true_elems.append(elem) + else: + false_elems.append(elem) + + return true_elems, false_elems + + +def bfs(initial: Sequence, expand: Callable) -> Iterator: + open_q = deque(list(initial)) + visited = set(open_q) + while open_q: + node = open_q.popleft() + yield node + for next_node in expand(node): + if next_node not in visited: + visited.add(next_node) + open_q.append(next_node) + +def bfs_all_unique(initial, expand): + "bfs, but doesn't keep track of visited (aka seen), because there can be no repetitions" + open_q = deque(list(initial)) + while open_q: + node = open_q.popleft() + yield node + open_q += expand(node) + + +def _serialize(value: Any, memo: Optional[SerializeMemoizer]) -> Any: + if isinstance(value, Serialize): + return value.serialize(memo) + elif isinstance(value, list): + return [_serialize(elem, memo) for elem in value] + elif isinstance(value, frozenset): + return list(value) # TODO reversible? + elif isinstance(value, dict): + return {key:_serialize(elem, memo) for key, elem in value.items()} + # assert value is None or isinstance(value, (int, float, str, tuple)), value + return value + + + + +def small_factors(n: int, max_factor: int) -> List[Tuple[int, int]]: + """ + Splits n up into smaller factors and summands <= max_factor. + Returns a list of [(a, b), ...] + so that the following code returns n: + + n = 1 + for a, b in values: + n = n * a + b + + Currently, we also keep a + b <= max_factor, but that might change + """ + assert n >= 0 + assert max_factor > 2 + if n <= max_factor: + return [(n, 0)] + + for a in range(max_factor, 1, -1): + r, b = divmod(n, a) + if a + b <= max_factor: + return small_factors(r, max_factor) + [(a, b)] + assert False, "Failed to factorize %s" % n diff --git a/src/poetry/core/_vendor/lark/visitors.py b/src/poetry/core/_vendor/lark/visitors.py new file mode 100644 index 0000000..932fbee --- /dev/null +++ b/src/poetry/core/_vendor/lark/visitors.py @@ -0,0 +1,587 @@ +from typing import TypeVar, Tuple, List, Callable, Generic, Type, Union, Optional, Any, cast +from abc import ABC + +from .utils import combine_alternatives +from .tree import Tree, Branch +from .exceptions import VisitError, GrammarError +from .lexer import Token + +###{standalone +from functools import wraps, update_wrapper +from inspect import getmembers, getmro + +_Return_T = TypeVar('_Return_T') +_Return_V = TypeVar('_Return_V') +_Leaf_T = TypeVar('_Leaf_T') +_Leaf_U = TypeVar('_Leaf_U') +_R = TypeVar('_R') +_FUNC = Callable[..., _Return_T] +_DECORATED = Union[_FUNC, type] + +class _DiscardType: + """When the Discard value is returned from a transformer callback, + that node is discarded and won't appear in the parent. + + Note: + This feature is disabled when the transformer is provided to Lark + using the ``transformer`` keyword (aka Tree-less LALR mode). + + Example: + :: + + class T(Transformer): + def ignore_tree(self, children): + return Discard + + def IGNORE_TOKEN(self, token): + return Discard + """ + + def __repr__(self): + return "lark.visitors.Discard" + +Discard = _DiscardType() + +# Transformers + +class _Decoratable: + "Provides support for decorating methods with @v_args" + + @classmethod + def _apply_v_args(cls, visit_wrapper): + mro = getmro(cls) + assert mro[0] is cls + libmembers = {name for _cls in mro[1:] for name, _ in getmembers(_cls)} + for name, value in getmembers(cls): + + # Make sure the function isn't inherited (unless it's overwritten) + if name.startswith('_') or (name in libmembers and name not in cls.__dict__): + continue + if not callable(value): + continue + + # Skip if v_args already applied (at the function level) + if isinstance(cls.__dict__[name], _VArgsWrapper): + continue + + setattr(cls, name, _VArgsWrapper(cls.__dict__[name], visit_wrapper)) + return cls + + def __class_getitem__(cls, _): + return cls + + +class Transformer(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + """Transformers work bottom-up (or depth-first), starting with visiting the leaves and working + their way up until ending at the root of the tree. + + For each node visited, the transformer will call the appropriate method (callbacks), according to the + node's ``data``, and use the returned value to replace the node, thereby creating a new tree structure. + + Transformers can be used to implement map & reduce patterns. Because nodes are reduced from leaf to root, + at any point the callbacks may assume the children have already been transformed (if applicable). + + If the transformer cannot find a method with the right name, it will instead call ``__default__``, which by + default creates a copy of the node. + + To discard a node, return Discard (``lark.visitors.Discard``). + + ``Transformer`` can do anything ``Visitor`` can do, but because it reconstructs the tree, + it is slightly less efficient. + + A transformer without methods essentially performs a non-memoized partial deepcopy. + + All these classes implement the transformer interface: + + - ``Transformer`` - Recursively transforms the tree. This is the one you probably want. + - ``Transformer_InPlace`` - Non-recursive. Changes the tree in-place instead of returning new instances + - ``Transformer_InPlaceRecursive`` - Recursive. Changes the tree in-place instead of returning new instances + + Parameters: + visit_tokens (bool, optional): Should the transformer visit tokens in addition to rules. + Setting this to ``False`` is slightly faster. Defaults to ``True``. + (For processing ignored tokens, use the ``lexer_callbacks`` options) + + """ + __visit_tokens__ = True # For backwards compatibility + + def __init__(self, visit_tokens: bool=True) -> None: + self.__visit_tokens__ = visit_tokens + + def _call_userfunc(self, tree, new_children=None): + # Assumes tree is already transformed + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + try: + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, children, tree.meta) + else: + return f(children) + except GrammarError: + raise + except Exception as e: + raise VisitError(tree.data, tree, e) + + def _call_userfunc_token(self, token): + try: + f = getattr(self, token.type) + except AttributeError: + return self.__default_token__(token) + else: + try: + return f(token) + except GrammarError: + raise + except Exception as e: + raise VisitError(token.type, token, e) + + def _transform_children(self, children): + for c in children: + if isinstance(c, Tree): + res = self._transform_tree(c) + elif self.__visit_tokens__ and isinstance(c, Token): + res = self._call_userfunc_token(c) + else: + res = c + + if res is not Discard: + yield res + + def _transform_tree(self, tree): + children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree, children) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + "Transform the given tree, and return the final result" + return self._transform_tree(tree) + + def __mul__( + self: 'Transformer[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V,]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + """Chain two transformers together, returning a new transformer. + """ + return TransformerChain(self, other) + + def __default__(self, data, children, meta): + """Default function that is called if there is no attribute matching ``data`` + + Can be overridden. Defaults to creating a new copy of the tree node (i.e. ``return Tree(data, children, meta)``) + """ + return Tree(data, children, meta) + + def __default_token__(self, token): + """Default function that is called if there is no attribute matching ``token.type`` + + Can be overridden. Defaults to returning the token as-is. + """ + return token + + +def merge_transformers(base_transformer=None, **transformers_to_merge): + """Merge a collection of transformers into the base_transformer, each into its own 'namespace'. + + When called, it will collect the methods from each transformer, and assign them to base_transformer, + with their name prefixed with the given keyword, as ``prefix__methodname``. + + This function is especially useful for processing grammars that import other grammars, + thereby creating some of their rules in a 'namespace'. (i.e with a consistent name prefix). + In this case, the key for the transformer should match the name of the imported grammar. + + Parameters: + base_transformer (Transformer, optional): The transformer that all other transformers will be added to. + **transformers_to_merge: Keyword arguments, in the form of ``name_prefix = transformer``. + + Raises: + AttributeError: In case of a name collision in the merged methods + + Example: + :: + + class TBase(Transformer): + def start(self, children): + return children[0] + 'bar' + + class TImportedGrammar(Transformer): + def foo(self, children): + return "foo" + + composed_transformer = merge_transformers(TBase(), imported=TImportedGrammar()) + + t = Tree('start', [ Tree('imported__foo', []) ]) + + assert composed_transformer.transform(t) == 'foobar' + + """ + if base_transformer is None: + base_transformer = Transformer() + for prefix, transformer in transformers_to_merge.items(): + for method_name in dir(transformer): + method = getattr(transformer, method_name) + if not callable(method): + continue + if method_name.startswith("_") or method_name == "transform": + continue + prefixed_method = prefix + "__" + method_name + if hasattr(base_transformer, prefixed_method): + raise AttributeError("Cannot merge: method '%s' appears more than once" % prefixed_method) + + setattr(base_transformer, prefixed_method, method) + + return base_transformer + + +class InlineTransformer(Transformer): # XXX Deprecated + def _call_userfunc(self, tree, new_children=None): + # Assumes tree is already transformed + children = new_children if new_children is not None else tree.children + try: + f = getattr(self, tree.data) + except AttributeError: + return self.__default__(tree.data, children, tree.meta) + else: + return f(*children) + + +class TransformerChain(Generic[_Leaf_T, _Return_T]): + + transformers: 'Tuple[Union[Transformer, TransformerChain], ...]' + + def __init__(self, *transformers: 'Union[Transformer, TransformerChain]') -> None: + self.transformers = transformers + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for t in self.transformers: + tree = t.transform(tree) + return cast(_Return_T, tree) + + def __mul__( + self: 'TransformerChain[_Leaf_T, Tree[_Leaf_U]]', + other: 'Union[Transformer[_Leaf_U, _Return_V], TransformerChain[_Leaf_U, _Return_V]]' + ) -> 'TransformerChain[_Leaf_T, _Return_V]': + return TransformerChain(*self.transformers + (other,)) + + +class Transformer_InPlace(Transformer): + """Same as Transformer, but non-recursive, and changes the tree in-place instead of returning new instances + + Useful for huge trees. Conservative in memory. + """ + def _transform_tree(self, tree): # Cancel recursion + return self._call_userfunc(tree) + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + for subtree in tree.iter_subtrees(): + subtree.children = list(self._transform_children(subtree.children)) + + return self._transform_tree(tree) + + +class Transformer_NonRecursive(Transformer): + """Same as Transformer but non-recursive. + + Like Transformer, it doesn't change the original tree. + + Useful for huge trees. + """ + + def transform(self, tree: Tree[_Leaf_T]) -> _Return_T: + # Tree to postfix + rev_postfix = [] + q: List[Branch[_Leaf_T]] = [tree] + while q: + t = q.pop() + rev_postfix.append(t) + if isinstance(t, Tree): + q += t.children + + # Postfix to tree + stack: List = [] + for x in reversed(rev_postfix): + if isinstance(x, Tree): + size = len(x.children) + if size: + args = stack[-size:] + del stack[-size:] + else: + args = [] + + res = self._call_userfunc(x, args) + if res is not Discard: + stack.append(res) + + elif self.__visit_tokens__ and isinstance(x, Token): + res = self._call_userfunc_token(x) + if res is not Discard: + stack.append(res) + else: + stack.append(x) + + result, = stack # We should have only one tree remaining + # There are no guarantees on the type of the value produced by calling a user func for a + # child will produce. This means type system can't statically know that the final result is + # _Return_T. As a result a cast is required. + return cast(_Return_T, result) + + +class Transformer_InPlaceRecursive(Transformer): + "Same as Transformer, recursive, but changes the tree in-place instead of returning new instances" + def _transform_tree(self, tree): + tree.children = list(self._transform_children(tree.children)) + return self._call_userfunc(tree) + + +# Visitors + +class VisitorBase: + def _call_userfunc(self, tree): + return getattr(self, tree.data, self.__default__)(tree) + + def __default__(self, tree): + """Default function that is called if there is no attribute matching ``tree.data`` + + Can be overridden. Defaults to doing nothing. + """ + return tree + + def __class_getitem__(cls, _): + return cls + + +class Visitor(VisitorBase, ABC, Generic[_Leaf_T]): + """Tree visitor, non-recursive (can handle huge trees). + + Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` + """ + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visits the tree, starting with the leaves and finally the root (bottom-up)" + for subtree in tree.iter_subtrees(): + self._call_userfunc(subtree) + return tree + + def visit_topdown(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visit the tree, starting at the root, and ending at the leaves (top-down)" + for subtree in tree.iter_subtrees_topdown(): + self._call_userfunc(subtree) + return tree + + +class Visitor_Recursive(VisitorBase, Generic[_Leaf_T]): + """Bottom-up visitor, recursive. + + Visiting a node calls its methods (provided by the user via inheritance) according to ``tree.data`` + + Slightly faster than the non-recursive version. + """ + + def visit(self, tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visits the tree, starting with the leaves and finally the root (bottom-up)" + for child in tree.children: + if isinstance(child, Tree): + self.visit(child) + + self._call_userfunc(tree) + return tree + + def visit_topdown(self,tree: Tree[_Leaf_T]) -> Tree[_Leaf_T]: + "Visit the tree, starting at the root, and ending at the leaves (top-down)" + self._call_userfunc(tree) + + for child in tree.children: + if isinstance(child, Tree): + self.visit_topdown(child) + + return tree + + +class Interpreter(_Decoratable, ABC, Generic[_Leaf_T, _Return_T]): + """Interpreter walks the tree starting at the root. + + Visits the tree, starting with the root and finally the leaves (top-down) + + For each tree node, it calls its methods (provided by user via inheritance) according to ``tree.data``. + + Unlike ``Transformer`` and ``Visitor``, the Interpreter doesn't automatically visit its sub-branches. + The user has to explicitly call ``visit``, ``visit_children``, or use the ``@visit_children_decor``. + This allows the user to implement branching and loops. + """ + + def visit(self, tree: Tree[_Leaf_T]) -> _Return_T: + # There are no guarantees on the type of the value produced by calling a user func for a + # child will produce. So only annotate the public method and use an internal method when + # visiting child trees. + return self._visit_tree(tree) + + def _visit_tree(self, tree: Tree[_Leaf_T]): + f = getattr(self, tree.data) + wrapper = getattr(f, 'visit_wrapper', None) + if wrapper is not None: + return f.visit_wrapper(f, tree.data, tree.children, tree.meta) + else: + return f(tree) + + def visit_children(self, tree: Tree[_Leaf_T]) -> List: + return [self._visit_tree(child) if isinstance(child, Tree) else child + for child in tree.children] + + def __getattr__(self, name): + return self.__default__ + + def __default__(self, tree): + return self.visit_children(tree) + + +_InterMethod = Callable[[Type[Interpreter], _Return_T], _R] + +def visit_children_decor(func: _InterMethod) -> _InterMethod: + "See Interpreter" + @wraps(func) + def inner(cls, tree): + values = cls.visit_children(tree) + return func(cls, values) + return inner + +# Decorators + +def _apply_v_args(obj, visit_wrapper): + try: + _apply = obj._apply_v_args + except AttributeError: + return _VArgsWrapper(obj, visit_wrapper) + else: + return _apply(visit_wrapper) + + +class _VArgsWrapper: + """ + A wrapper around a Callable. It delegates `__call__` to the Callable. + If the Callable has a `__get__`, that is also delegate and the resulting function is wrapped. + Otherwise, we use the original function mirroring the behaviour without a __get__. + We also have the visit_wrapper attribute to be used by Transformers. + """ + base_func: Callable + + def __init__(self, func: Callable, visit_wrapper: Callable[[Callable, str, list, Any], Any]): + if isinstance(func, _VArgsWrapper): + func = func.base_func + # https://github.com/python/mypy/issues/708 + self.base_func = func # type: ignore[assignment] + self.visit_wrapper = visit_wrapper + update_wrapper(self, func) + + def __call__(self, *args, **kwargs): + return self.base_func(*args, **kwargs) + + def __get__(self, instance, owner=None): + try: + # Use the __get__ attribute of the type instead of the instance + # to fully mirror the behavior of getattr + g = type(self.base_func).__get__ + except AttributeError: + return self + else: + return _VArgsWrapper(g(self.base_func, instance, owner), self.visit_wrapper) + + def __set_name__(self, owner, name): + try: + f = type(self.base_func).__set_name__ + except AttributeError: + return + else: + f(self.base_func, owner, name) + + +def _vargs_inline(f, _data, children, _meta): + return f(*children) +def _vargs_meta_inline(f, _data, children, meta): + return f(meta, *children) +def _vargs_meta(f, _data, children, meta): + return f(meta, children) +def _vargs_tree(f, data, children, meta): + return f(Tree(data, children, meta)) + + +def v_args(inline: bool = False, meta: bool = False, tree: bool = False, wrapper: Optional[Callable] = None) -> Callable[[_DECORATED], _DECORATED]: + """A convenience decorator factory for modifying the behavior of user-supplied visitor methods. + + By default, callback methods of transformers/visitors accept one argument - a list of the node's children. + + ``v_args`` can modify this behavior. When used on a transformer/visitor class definition, + it applies to all the callback methods inside it. + + ``v_args`` can be applied to a single method, or to an entire class. When applied to both, + the options given to the method take precedence. + + Parameters: + inline (bool, optional): Children are provided as ``*args`` instead of a list argument (not recommended for very long lists). + meta (bool, optional): Provides two arguments: ``children`` and ``meta`` (instead of just the first) + tree (bool, optional): Provides the entire tree as the argument, instead of the children. + wrapper (function, optional): Provide a function to decorate all methods. + + Example: + :: + + @v_args(inline=True) + class SolveArith(Transformer): + def add(self, left, right): + return left + right + + + class ReverseNotation(Transformer_InPlace): + @v_args(tree=True) + def tree_node(self, tree): + tree.children = tree.children[::-1] + """ + if tree and (meta or inline): + raise ValueError("Visitor functions cannot combine 'tree' with 'meta' or 'inline'.") + + func = None + if meta: + if inline: + func = _vargs_meta_inline + else: + func = _vargs_meta + elif inline: + func = _vargs_inline + elif tree: + func = _vargs_tree + + if wrapper is not None: + if func is not None: + raise ValueError("Cannot use 'wrapper' along with 'tree', 'meta' or 'inline'.") + func = wrapper + + def _visitor_args_dec(obj): + return _apply_v_args(obj, func) + return _visitor_args_dec + + +###} + + +# --- Visitor Utilities --- + +class CollapseAmbiguities(Transformer): + """ + Transforms a tree that contains any number of _ambig nodes into a list of trees, + each one containing an unambiguous tree. + + The length of the resulting list is the product of the length of all _ambig nodes. + + Warning: This may quickly explode for highly ambiguous trees. + + """ + def _ambig(self, options): + return sum(options, []) + + def __default__(self, data, children_lists, meta): + return [Tree(data, children, meta) for children in combine_alternatives(children_lists)] + + def __default_token__(self, t): + return [t] diff --git a/src/poetry/core/_vendor/packaging/LICENSE b/src/poetry/core/_vendor/packaging/LICENSE new file mode 100644 index 0000000..6f62d44 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/src/poetry/core/_vendor/packaging/LICENSE.APACHE b/src/poetry/core/_vendor/packaging/LICENSE.APACHE new file mode 100644 index 0000000..f433b1a --- /dev/null +++ b/src/poetry/core/_vendor/packaging/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/src/poetry/core/_vendor/packaging/LICENSE.BSD b/src/poetry/core/_vendor/packaging/LICENSE.BSD new file mode 100644 index 0000000..42ce7b7 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/src/poetry/core/_vendor/packaging/__about__.py b/src/poetry/core/_vendor/packaging/__about__.py new file mode 100644 index 0000000..3551bc2 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/__about__.py @@ -0,0 +1,26 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] + +__title__ = "packaging" +__summary__ = "Core utilities for Python packages" +__uri__ = "https://github.com/pypa/packaging" + +__version__ = "21.3" + +__author__ = "Donald Stufft and individual contributors" +__email__ = "donald@stufft.io" + +__license__ = "BSD-2-Clause or Apache-2.0" +__copyright__ = "2014-2019 %s" % __author__ diff --git a/src/poetry/core/_vendor/packaging/__init__.py b/src/poetry/core/_vendor/packaging/__init__.py new file mode 100644 index 0000000..3c50c5d --- /dev/null +++ b/src/poetry/core/_vendor/packaging/__init__.py @@ -0,0 +1,25 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +from .__about__ import ( + __author__, + __copyright__, + __email__, + __license__, + __summary__, + __title__, + __uri__, + __version__, +) + +__all__ = [ + "__title__", + "__summary__", + "__uri__", + "__version__", + "__author__", + "__email__", + "__license__", + "__copyright__", +] diff --git a/src/poetry/core/_vendor/packaging/_manylinux.py b/src/poetry/core/_vendor/packaging/_manylinux.py new file mode 100644 index 0000000..4c379aa --- /dev/null +++ b/src/poetry/core/_vendor/packaging/_manylinux.py @@ -0,0 +1,301 @@ +import collections +import functools +import os +import re +import struct +import sys +import warnings +from typing import IO, Dict, Iterator, NamedTuple, Optional, Tuple + + +# Python does not provide platform information at sufficient granularity to +# identify the architecture of the running executable in some cases, so we +# determine it dynamically by reading the information from the running +# process. This only applies on Linux, which uses the ELF format. +class _ELFFileHeader: + # https://en.wikipedia.org/wiki/Executable_and_Linkable_Format#File_header + class _InvalidELFFileHeader(ValueError): + """ + An invalid ELF file header was found. + """ + + ELF_MAGIC_NUMBER = 0x7F454C46 + ELFCLASS32 = 1 + ELFCLASS64 = 2 + ELFDATA2LSB = 1 + ELFDATA2MSB = 2 + EM_386 = 3 + EM_S390 = 22 + EM_ARM = 40 + EM_X86_64 = 62 + EF_ARM_ABIMASK = 0xFF000000 + EF_ARM_ABI_VER5 = 0x05000000 + EF_ARM_ABI_FLOAT_HARD = 0x00000400 + + def __init__(self, file: IO[bytes]) -> None: + def unpack(fmt: str) -> int: + try: + data = file.read(struct.calcsize(fmt)) + result: Tuple[int, ...] = struct.unpack(fmt, data) + except struct.error: + raise _ELFFileHeader._InvalidELFFileHeader() + return result[0] + + self.e_ident_magic = unpack(">I") + if self.e_ident_magic != self.ELF_MAGIC_NUMBER: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_class = unpack("B") + if self.e_ident_class not in {self.ELFCLASS32, self.ELFCLASS64}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_data = unpack("B") + if self.e_ident_data not in {self.ELFDATA2LSB, self.ELFDATA2MSB}: + raise _ELFFileHeader._InvalidELFFileHeader() + self.e_ident_version = unpack("B") + self.e_ident_osabi = unpack("B") + self.e_ident_abiversion = unpack("B") + self.e_ident_pad = file.read(7) + format_h = "H" + format_i = "I" + format_q = "Q" + format_p = format_i if self.e_ident_class == self.ELFCLASS32 else format_q + self.e_type = unpack(format_h) + self.e_machine = unpack(format_h) + self.e_version = unpack(format_i) + self.e_entry = unpack(format_p) + self.e_phoff = unpack(format_p) + self.e_shoff = unpack(format_p) + self.e_flags = unpack(format_i) + self.e_ehsize = unpack(format_h) + self.e_phentsize = unpack(format_h) + self.e_phnum = unpack(format_h) + self.e_shentsize = unpack(format_h) + self.e_shnum = unpack(format_h) + self.e_shstrndx = unpack(format_h) + + +def _get_elf_header() -> Optional[_ELFFileHeader]: + try: + with open(sys.executable, "rb") as f: + elf_header = _ELFFileHeader(f) + except (OSError, TypeError, _ELFFileHeader._InvalidELFFileHeader): + return None + return elf_header + + +def _is_linux_armhf() -> bool: + # hard-float ABI can be detected from the ELF header of the running + # process + # https://static.docs.arm.com/ihi0044/g/aaelf32.pdf + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_ARM + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABIMASK + ) == elf_header.EF_ARM_ABI_VER5 + result &= ( + elf_header.e_flags & elf_header.EF_ARM_ABI_FLOAT_HARD + ) == elf_header.EF_ARM_ABI_FLOAT_HARD + return result + + +def _is_linux_i686() -> bool: + elf_header = _get_elf_header() + if elf_header is None: + return False + result = elf_header.e_ident_class == elf_header.ELFCLASS32 + result &= elf_header.e_ident_data == elf_header.ELFDATA2LSB + result &= elf_header.e_machine == elf_header.EM_386 + return result + + +def _have_compatible_abi(arch: str) -> bool: + if arch == "armv7l": + return _is_linux_armhf() + if arch == "i686": + return _is_linux_i686() + return arch in {"x86_64", "aarch64", "ppc64", "ppc64le", "s390x"} + + +# If glibc ever changes its major version, we need to know what the last +# minor version was, so we can build the complete list of all versions. +# For now, guess what the highest minor version might be, assume it will +# be 50 for testing. Once this actually happens, update the dictionary +# with the actual value. +_LAST_GLIBC_MINOR: Dict[int, int] = collections.defaultdict(lambda: 50) + + +class _GLibCVersion(NamedTuple): + major: int + minor: int + + +def _glibc_version_string_confstr() -> Optional[str]: + """ + Primary implementation of glibc_version_string using os.confstr. + """ + # os.confstr is quite a bit faster than ctypes.DLL. It's also less likely + # to be broken or missing. This strategy is used in the standard library + # platform module. + # https://github.com/python/cpython/blob/fcf1d003bf4f0100c/Lib/platform.py#L175-L183 + try: + # os.confstr("CS_GNU_LIBC_VERSION") returns a string like "glibc 2.17". + version_string = os.confstr("CS_GNU_LIBC_VERSION") + assert version_string is not None + _, version = version_string.split() + except (AssertionError, AttributeError, OSError, ValueError): + # os.confstr() or CS_GNU_LIBC_VERSION not available (or a bad value)... + return None + return version + + +def _glibc_version_string_ctypes() -> Optional[str]: + """ + Fallback implementation of glibc_version_string using ctypes. + """ + try: + import ctypes + except ImportError: + return None + + # ctypes.CDLL(None) internally calls dlopen(NULL), and as the dlopen + # manpage says, "If filename is NULL, then the returned handle is for the + # main program". This way we can let the linker do the work to figure out + # which libc our process is actually using. + # + # We must also handle the special case where the executable is not a + # dynamically linked executable. This can occur when using musl libc, + # for example. In this situation, dlopen() will error, leading to an + # OSError. Interestingly, at least in the case of musl, there is no + # errno set on the OSError. The single string argument used to construct + # OSError comes from libc itself and is therefore not portable to + # hard code here. In any case, failure to call dlopen() means we + # can proceed, so we bail on our attempt. + try: + process_namespace = ctypes.CDLL(None) + except OSError: + return None + + try: + gnu_get_libc_version = process_namespace.gnu_get_libc_version + except AttributeError: + # Symbol doesn't exist -> therefore, we are not linked to + # glibc. + return None + + # Call gnu_get_libc_version, which returns a string like "2.5" + gnu_get_libc_version.restype = ctypes.c_char_p + version_str: str = gnu_get_libc_version() + # py2 / py3 compatibility: + if not isinstance(version_str, str): + version_str = version_str.decode("ascii") + + return version_str + + +def _glibc_version_string() -> Optional[str]: + """Returns glibc version string, or None if not using glibc.""" + return _glibc_version_string_confstr() or _glibc_version_string_ctypes() + + +def _parse_glibc_version(version_str: str) -> Tuple[int, int]: + """Parse glibc version. + + We use a regexp instead of str.split because we want to discard any + random junk that might come after the minor version -- this might happen + in patched/forked versions of glibc (e.g. Linaro's version of glibc + uses version strings like "2.20-2014.11"). See gh-3588. + """ + m = re.match(r"(?P[0-9]+)\.(?P[0-9]+)", version_str) + if not m: + warnings.warn( + "Expected glibc version with 2 components major.minor," + " got: %s" % version_str, + RuntimeWarning, + ) + return -1, -1 + return int(m.group("major")), int(m.group("minor")) + + +@functools.lru_cache() +def _get_glibc_version() -> Tuple[int, int]: + version_str = _glibc_version_string() + if version_str is None: + return (-1, -1) + return _parse_glibc_version(version_str) + + +# From PEP 513, PEP 600 +def _is_compatible(name: str, arch: str, version: _GLibCVersion) -> bool: + sys_glibc = _get_glibc_version() + if sys_glibc < version: + return False + # Check for presence of _manylinux module. + try: + import _manylinux # noqa + except ImportError: + return True + if hasattr(_manylinux, "manylinux_compatible"): + result = _manylinux.manylinux_compatible(version[0], version[1], arch) + if result is not None: + return bool(result) + return True + if version == _GLibCVersion(2, 5): + if hasattr(_manylinux, "manylinux1_compatible"): + return bool(_manylinux.manylinux1_compatible) + if version == _GLibCVersion(2, 12): + if hasattr(_manylinux, "manylinux2010_compatible"): + return bool(_manylinux.manylinux2010_compatible) + if version == _GLibCVersion(2, 17): + if hasattr(_manylinux, "manylinux2014_compatible"): + return bool(_manylinux.manylinux2014_compatible) + return True + + +_LEGACY_MANYLINUX_MAP = { + # CentOS 7 w/ glibc 2.17 (PEP 599) + (2, 17): "manylinux2014", + # CentOS 6 w/ glibc 2.12 (PEP 571) + (2, 12): "manylinux2010", + # CentOS 5 w/ glibc 2.5 (PEP 513) + (2, 5): "manylinux1", +} + + +def platform_tags(linux: str, arch: str) -> Iterator[str]: + if not _have_compatible_abi(arch): + return + # Oldest glibc to be supported regardless of architecture is (2, 17). + too_old_glibc2 = _GLibCVersion(2, 16) + if arch in {"x86_64", "i686"}: + # On x86/i686 also oldest glibc to be supported is (2, 5). + too_old_glibc2 = _GLibCVersion(2, 4) + current_glibc = _GLibCVersion(*_get_glibc_version()) + glibc_max_list = [current_glibc] + # We can assume compatibility across glibc major versions. + # https://sourceware.org/bugzilla/show_bug.cgi?id=24636 + # + # Build a list of maximum glibc versions so that we can + # output the canonical list of all glibc from current_glibc + # down to too_old_glibc2, including all intermediary versions. + for glibc_major in range(current_glibc.major - 1, 1, -1): + glibc_minor = _LAST_GLIBC_MINOR[glibc_major] + glibc_max_list.append(_GLibCVersion(glibc_major, glibc_minor)) + for glibc_max in glibc_max_list: + if glibc_max.major == too_old_glibc2.major: + min_minor = too_old_glibc2.minor + else: + # For other glibc major versions oldest supported is (x, 0). + min_minor = -1 + for glibc_minor in range(glibc_max.minor, min_minor, -1): + glibc_version = _GLibCVersion(glibc_max.major, glibc_minor) + tag = "manylinux_{}_{}".format(*glibc_version) + if _is_compatible(tag, arch, glibc_version): + yield linux.replace("linux", tag) + # Handle the legacy manylinux1, manylinux2010, manylinux2014 tags. + if glibc_version in _LEGACY_MANYLINUX_MAP: + legacy_tag = _LEGACY_MANYLINUX_MAP[glibc_version] + if _is_compatible(legacy_tag, arch, glibc_version): + yield linux.replace("linux", legacy_tag) diff --git a/src/poetry/core/_vendor/packaging/_musllinux.py b/src/poetry/core/_vendor/packaging/_musllinux.py new file mode 100644 index 0000000..8ac3059 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/_musllinux.py @@ -0,0 +1,136 @@ +"""PEP 656 support. + +This module implements logic to detect if the currently running Python is +linked against musl, and what musl version is used. +""" + +import contextlib +import functools +import operator +import os +import re +import struct +import subprocess +import sys +from typing import IO, Iterator, NamedTuple, Optional, Tuple + + +def _read_unpacked(f: IO[bytes], fmt: str) -> Tuple[int, ...]: + return struct.unpack(fmt, f.read(struct.calcsize(fmt))) + + +def _parse_ld_musl_from_elf(f: IO[bytes]) -> Optional[str]: + """Detect musl libc location by parsing the Python executable. + + Based on: https://gist.github.com/lyssdod/f51579ae8d93c8657a5564aefc2ffbca + ELF header: https://refspecs.linuxfoundation.org/elf/gabi4+/ch4.eheader.html + """ + f.seek(0) + try: + ident = _read_unpacked(f, "16B") + except struct.error: + return None + if ident[:4] != tuple(b"\x7fELF"): # Invalid magic, not ELF. + return None + f.seek(struct.calcsize("HHI"), 1) # Skip file type, machine, and version. + + try: + # e_fmt: Format for program header. + # p_fmt: Format for section header. + # p_idx: Indexes to find p_type, p_offset, and p_filesz. + e_fmt, p_fmt, p_idx = { + 1: ("IIIIHHH", "IIIIIIII", (0, 1, 4)), # 32-bit. + 2: ("QQQIHHH", "IIQQQQQQ", (0, 2, 5)), # 64-bit. + }[ident[4]] + except KeyError: + return None + else: + p_get = operator.itemgetter(*p_idx) + + # Find the interpreter section and return its content. + try: + _, e_phoff, _, _, _, e_phentsize, e_phnum = _read_unpacked(f, e_fmt) + except struct.error: + return None + for i in range(e_phnum + 1): + f.seek(e_phoff + e_phentsize * i) + try: + p_type, p_offset, p_filesz = p_get(_read_unpacked(f, p_fmt)) + except struct.error: + return None + if p_type != 3: # Not PT_INTERP. + continue + f.seek(p_offset) + interpreter = os.fsdecode(f.read(p_filesz)).strip("\0") + if "musl" not in interpreter: + return None + return interpreter + return None + + +class _MuslVersion(NamedTuple): + major: int + minor: int + + +def _parse_musl_version(output: str) -> Optional[_MuslVersion]: + lines = [n for n in (n.strip() for n in output.splitlines()) if n] + if len(lines) < 2 or lines[0][:4] != "musl": + return None + m = re.match(r"Version (\d+)\.(\d+)", lines[1]) + if not m: + return None + return _MuslVersion(major=int(m.group(1)), minor=int(m.group(2))) + + +@functools.lru_cache() +def _get_musl_version(executable: str) -> Optional[_MuslVersion]: + """Detect currently-running musl runtime version. + + This is done by checking the specified executable's dynamic linking + information, and invoking the loader to parse its output for a version + string. If the loader is musl, the output would be something like:: + + musl libc (x86_64) + Version 1.2.2 + Dynamic Program Loader + """ + with contextlib.ExitStack() as stack: + try: + f = stack.enter_context(open(executable, "rb")) + except OSError: + return None + ld = _parse_ld_musl_from_elf(f) + if not ld: + return None + proc = subprocess.run([ld], stderr=subprocess.PIPE, universal_newlines=True) + return _parse_musl_version(proc.stderr) + + +def platform_tags(arch: str) -> Iterator[str]: + """Generate musllinux tags compatible to the current platform. + + :param arch: Should be the part of platform tag after the ``linux_`` + prefix, e.g. ``x86_64``. The ``linux_`` prefix is assumed as a + prerequisite for the current platform to be musllinux-compatible. + + :returns: An iterator of compatible musllinux tags. + """ + sys_musl = _get_musl_version(sys.executable) + if sys_musl is None: # Python not dynamically linked against musl. + return + for minor in range(sys_musl.minor, -1, -1): + yield f"musllinux_{sys_musl.major}_{minor}_{arch}" + + +if __name__ == "__main__": # pragma: no cover + import sysconfig + + plat = sysconfig.get_platform() + assert plat.startswith("linux-"), "not linux" + + print("plat:", plat) + print("musl:", _get_musl_version(sys.executable)) + print("tags:", end=" ") + for t in platform_tags(re.sub(r"[.-]", "_", plat.split("-", 1)[-1])): + print(t, end="\n ") diff --git a/src/poetry/core/_vendor/packaging/_structures.py b/src/poetry/core/_vendor/packaging/_structures.py new file mode 100644 index 0000000..90a6465 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/_structures.py @@ -0,0 +1,61 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> "NegativeInfinityType": + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, self.__class__) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() diff --git a/src/poetry/core/_vendor/packaging/markers.py b/src/poetry/core/_vendor/packaging/markers.py new file mode 100644 index 0000000..cb640e8 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/markers.py @@ -0,0 +1,304 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import operator +import os +import platform +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple, Union + +from pyparsing import ( # noqa: N817 + Forward, + Group, + Literal as L, + ParseException, + ParseResults, + QuotedString, + ZeroOrMore, + stringEnd, + stringStart, +) + +from .specifiers import InvalidSpecifier, Specifier + +__all__ = [ + "InvalidMarker", + "UndefinedComparison", + "UndefinedEnvironmentName", + "Marker", + "default_environment", +] + +Operator = Callable[[str, str], bool] + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +class Node: + def __init__(self, value: Any) -> None: + self.value = value + + def __str__(self) -> str: + return str(self.value) + + def __repr__(self) -> str: + return f"<{self.__class__.__name__}('{self}')>" + + def serialize(self) -> str: + raise NotImplementedError + + +class Variable(Node): + def serialize(self) -> str: + return str(self) + + +class Value(Node): + def serialize(self) -> str: + return f'"{self}"' + + +class Op(Node): + def serialize(self) -> str: + return str(self) + + +VARIABLE = ( + L("implementation_version") + | L("platform_python_implementation") + | L("implementation_name") + | L("python_full_version") + | L("platform_release") + | L("platform_version") + | L("platform_machine") + | L("platform_system") + | L("python_version") + | L("sys_platform") + | L("os_name") + | L("os.name") # PEP-345 + | L("sys.platform") # PEP-345 + | L("platform.version") # PEP-345 + | L("platform.machine") # PEP-345 + | L("platform.python_implementation") # PEP-345 + | L("python_implementation") # undocumented setuptools legacy + | L("extra") # PEP-508 +) +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} +VARIABLE.setParseAction(lambda s, l, t: Variable(ALIASES.get(t[0], t[0]))) + +VERSION_CMP = ( + L("===") | L("==") | L(">=") | L("<=") | L("!=") | L("~=") | L(">") | L("<") +) + +MARKER_OP = VERSION_CMP | L("not in") | L("in") +MARKER_OP.setParseAction(lambda s, l, t: Op(t[0])) + +MARKER_VALUE = QuotedString("'") | QuotedString('"') +MARKER_VALUE.setParseAction(lambda s, l, t: Value(t[0])) + +BOOLOP = L("and") | L("or") + +MARKER_VAR = VARIABLE | MARKER_VALUE + +MARKER_ITEM = Group(MARKER_VAR + MARKER_OP + MARKER_VAR) +MARKER_ITEM.setParseAction(lambda s, l, t: tuple(t[0])) + +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() + +MARKER_EXPR = Forward() +MARKER_ATOM = MARKER_ITEM | Group(LPAREN + MARKER_EXPR + RPAREN) +MARKER_EXPR << MARKER_ATOM + ZeroOrMore(BOOLOP + MARKER_EXPR) + +MARKER = stringStart + MARKER_EXPR + stringEnd + + +def _coerce_parse_result(results: Union[ParseResults, List[Any]]) -> List[Any]: + if isinstance(results, ParseResults): + return [_coerce_parse_result(i) for i in results] + else: + return results + + +def _format_marker( + marker: Union[List[str], Tuple[Node, ...], str], first: Optional[bool] = True +) -> str: + + assert isinstance(marker, (list, tuple, str)) + + # Sometimes we have a structure like [[...]] which is a single item list + # where the single item is itself it's own list. In that case we want skip + # the rest of this function so that we don't get extraneous () on the + # outside. + if ( + isinstance(marker, list) + and len(marker) == 1 + and isinstance(marker[0], (list, tuple)) + ): + return _format_marker(marker[0]) + + if isinstance(marker, list): + inner = (_format_marker(m, first=False) for m in marker) + if first: + return " ".join(inner) + else: + return "(" + " ".join(inner) + ")" + elif isinstance(marker, tuple): + return " ".join([m.serialize() for m in marker]) + else: + return marker + + +_operators: Dict[str, Operator] = { + "in": lambda lhs, rhs: lhs in rhs, + "not in": lambda lhs, rhs: lhs not in rhs, + "<": operator.lt, + "<=": operator.le, + "==": operator.eq, + "!=": operator.ne, + ">=": operator.ge, + ">": operator.gt, +} + + +def _eval_op(lhs: str, op: Op, rhs: str) -> bool: + try: + spec = Specifier("".join([op.serialize(), rhs])) + except InvalidSpecifier: + pass + else: + return spec.contains(lhs) + + oper: Optional[Operator] = _operators.get(op.serialize()) + if oper is None: + raise UndefinedComparison(f"Undefined {op!r} on {lhs!r} and {rhs!r}.") + + return oper(lhs, rhs) + + +class Undefined: + pass + + +_undefined = Undefined() + + +def _get_env(environment: Dict[str, str], name: str) -> str: + value: Union[str, Undefined] = environment.get(name, _undefined) + + if isinstance(value, Undefined): + raise UndefinedEnvironmentName( + f"{name!r} does not exist in evaluation environment." + ) + + return value + + +def _evaluate_markers(markers: List[Any], environment: Dict[str, str]) -> bool: + groups: List[List[bool]] = [[]] + + for marker in markers: + assert isinstance(marker, (list, tuple, str)) + + if isinstance(marker, list): + groups[-1].append(_evaluate_markers(marker, environment)) + elif isinstance(marker, tuple): + lhs, op, rhs = marker + + if isinstance(lhs, Variable): + lhs_value = _get_env(environment, lhs.value) + rhs_value = rhs.value + else: + lhs_value = lhs.value + rhs_value = _get_env(environment, rhs.value) + + groups[-1].append(_eval_op(lhs_value, op, rhs_value)) + else: + assert marker in ["and", "or"] + if marker == "or": + groups.append([]) + + return any(all(item) for item in groups) + + +def format_full_version(info: "sys._version_info") -> str: + version = "{0.major}.{0.minor}.{0.micro}".format(info) + kind = info.releaselevel + if kind != "final": + version += kind[0] + str(info.serial) + return version + + +def default_environment() -> Dict[str, str]: + iver = format_full_version(sys.implementation.version) + implementation_name = sys.implementation.name + return { + "implementation_name": implementation_name, + "implementation_version": iver, + "os_name": os.name, + "platform_machine": platform.machine(), + "platform_release": platform.release(), + "platform_system": platform.system(), + "platform_version": platform.version(), + "python_full_version": platform.python_version(), + "platform_python_implementation": platform.python_implementation(), + "python_version": ".".join(platform.python_version_tuple()[:2]), + "sys_platform": sys.platform, + } + + +class Marker: + def __init__(self, marker: str) -> None: + try: + self._markers = _coerce_parse_result(MARKER.parseString(marker)) + except ParseException as e: + raise InvalidMarker( + f"Invalid marker: {marker!r}, parse error at " + f"{marker[e.loc : e.loc + 8]!r}" + ) + + def __str__(self) -> str: + return _format_marker(self._markers) + + def __repr__(self) -> str: + return f"" + + def evaluate(self, environment: Optional[Dict[str, str]] = None) -> bool: + """Evaluate a marker. + + Return the boolean from evaluating the given marker against the + environment. environment is an optional argument to override all or + part of the determined environment. + + The environment is determined from the current Python process. + """ + current_environment = default_environment() + if environment is not None: + current_environment.update(environment) + + return _evaluate_markers(self._markers, current_environment) diff --git a/src/poetry/core/_vendor/packaging/py.typed b/src/poetry/core/_vendor/packaging/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/packaging/requirements.py b/src/poetry/core/_vendor/packaging/requirements.py new file mode 100644 index 0000000..53f9a3a --- /dev/null +++ b/src/poetry/core/_vendor/packaging/requirements.py @@ -0,0 +1,146 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +import string +import urllib.parse +from typing import List, Optional as TOptional, Set + +from pyparsing import ( # noqa + Combine, + Literal as L, + Optional, + ParseException, + Regex, + Word, + ZeroOrMore, + originalTextFor, + stringEnd, + stringStart, +) + +from .markers import MARKER_EXPR, Marker +from .specifiers import LegacySpecifier, Specifier, SpecifierSet + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +ALPHANUM = Word(string.ascii_letters + string.digits) + +LBRACKET = L("[").suppress() +RBRACKET = L("]").suppress() +LPAREN = L("(").suppress() +RPAREN = L(")").suppress() +COMMA = L(",").suppress() +SEMICOLON = L(";").suppress() +AT = L("@").suppress() + +PUNCTUATION = Word("-_.") +IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) +IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) + +NAME = IDENTIFIER("name") +EXTRA = IDENTIFIER + +URI = Regex(r"[^ ]+")("url") +URL = AT + URI + +EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) +EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") + +VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) +VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) + +VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY +VERSION_MANY = Combine( + VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False +)("_raw_spec") +_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) +_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") + +VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") +VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) + +MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") +MARKER_EXPR.setParseAction( + lambda s, l, t: Marker(s[t._original_start : t._original_end]) +) +MARKER_SEPARATOR = SEMICOLON +MARKER = MARKER_SEPARATOR + MARKER_EXPR + +VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) +URL_AND_MARKER = URL + Optional(MARKER) + +NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) + +REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd +# pyparsing isn't thread safe during initialization, so we do it eagerly, see +# issue #104 +REQUIREMENT.parseString("x[]") + + +class Requirement: + """Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + # TODO: Can we test whether something is contained within a requirement? + # If so how do we do that? Do we need to test against the _name_ of + # the thing as well as the version? What about the markers? + # TODO: Can we normalize the name and extra name? + + def __init__(self, requirement_string: str) -> None: + try: + req = REQUIREMENT.parseString(requirement_string) + except ParseException as e: + raise InvalidRequirement( + f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' + ) + + self.name: str = req.name + if req.url: + parsed_url = urllib.parse.urlparse(req.url) + if parsed_url.scheme == "file": + if urllib.parse.urlunparse(parsed_url) != req.url: + raise InvalidRequirement("Invalid URL given") + elif not (parsed_url.scheme and parsed_url.netloc) or ( + not parsed_url.scheme and not parsed_url.netloc + ): + raise InvalidRequirement(f"Invalid URL: {req.url}") + self.url: TOptional[str] = req.url + else: + self.url = None + self.extras: Set[str] = set(req.extras.asList() if req.extras else []) + self.specifier: SpecifierSet = SpecifierSet(req.specifier) + self.marker: TOptional[Marker] = req.marker if req.marker else None + + def __str__(self) -> str: + parts: List[str] = [self.name] + + if self.extras: + formatted_extras = ",".join(sorted(self.extras)) + parts.append(f"[{formatted_extras}]") + + if self.specifier: + parts.append(str(self.specifier)) + + if self.url: + parts.append(f"@ {self.url}") + if self.marker: + parts.append(" ") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" diff --git a/src/poetry/core/_vendor/packaging/specifiers.py b/src/poetry/core/_vendor/packaging/specifiers.py new file mode 100644 index 0000000..0e218a6 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/specifiers.py @@ -0,0 +1,802 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import abc +import functools +import itertools +import re +import warnings +from typing import ( + Callable, + Dict, + Iterable, + Iterator, + List, + Optional, + Pattern, + Set, + Tuple, + TypeVar, + Union, +) + +from .utils import canonicalize_version +from .version import LegacyVersion, Version, parse + +ParsedVersion = Union[Version, LegacyVersion] +UnparsedVersion = Union[Version, LegacyVersion, str] +VersionTypeVar = TypeVar("VersionTypeVar", bound=UnparsedVersion) +CallableOperator = Callable[[ParsedVersion, str], bool] + + +class InvalidSpecifier(ValueError): + """ + An invalid specifier was found, users should refer to PEP 440. + """ + + +class BaseSpecifier(metaclass=abc.ABCMeta): + @abc.abstractmethod + def __str__(self) -> str: + """ + Returns the str representation of this Specifier like object. This + should be representative of the Specifier itself. + """ + + @abc.abstractmethod + def __hash__(self) -> int: + """ + Returns a hash value for this Specifier like object. + """ + + @abc.abstractmethod + def __eq__(self, other: object) -> bool: + """ + Returns a boolean representing whether or not the two Specifier like + objects are equal. + """ + + @abc.abstractproperty + def prereleases(self) -> Optional[bool]: + """ + Returns whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @prereleases.setter + def prereleases(self, value: bool) -> None: + """ + Sets whether or not pre-releases as a whole are allowed by this + specifier. + """ + + @abc.abstractmethod + def contains(self, item: str, prereleases: Optional[bool] = None) -> bool: + """ + Determines if the given item is contained within this specifier. + """ + + @abc.abstractmethod + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + """ + Takes an iterable of items and filters them so that only items which + are contained within this specifier are allowed in it. + """ + + +class _IndividualSpecifier(BaseSpecifier): + + _operators: Dict[str, str] = {} + _regex: Pattern[str] + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + match = self._regex.search(spec) + if not match: + raise InvalidSpecifier(f"Invalid specifier: '{spec}'") + + self._spec: Tuple[str, str] = ( + match.group("operator").strip(), + match.group("version").strip(), + ) + + # Store whether or not this Specifier should accept prereleases + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"<{self.__class__.__name__}({str(self)!r}{pre})>" + + def __str__(self) -> str: + return "{}{}".format(*self._spec) + + @property + def _canonical_spec(self) -> Tuple[str, str]: + return self._spec[0], canonicalize_version(self._spec[1]) + + def __hash__(self) -> int: + return hash(self._canonical_spec) + + def __eq__(self, other: object) -> bool: + if isinstance(other, str): + try: + other = self.__class__(str(other)) + except InvalidSpecifier: + return NotImplemented + elif not isinstance(other, self.__class__): + return NotImplemented + + return self._canonical_spec == other._canonical_spec + + def _get_operator(self, op: str) -> CallableOperator: + operator_callable: CallableOperator = getattr( + self, f"_compare_{self._operators[op]}" + ) + return operator_callable + + def _coerce_version(self, version: UnparsedVersion) -> ParsedVersion: + if not isinstance(version, (LegacyVersion, Version)): + version = parse(version) + return version + + @property + def operator(self) -> str: + return self._spec[0] + + @property + def version(self) -> str: + return self._spec[1] + + @property + def prereleases(self) -> Optional[bool]: + return self._prereleases + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: str) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Determine if prereleases are to be allowed or not. + if prereleases is None: + prereleases = self.prereleases + + # Normalize item to a Version or LegacyVersion, this allows us to have + # a shortcut for ``"2.0" in Specifier(">=2") + normalized_item = self._coerce_version(item) + + # Determine if we should be supporting prereleases in this specifier + # or not, if we do not support prereleases than we can short circuit + # logic if this version is a prereleases. + if normalized_item.is_prerelease and not prereleases: + return False + + # Actually do the comparison to determine if this item is contained + # within this Specifier or not. + operator_callable: CallableOperator = self._get_operator(self.operator) + return operator_callable(normalized_item, self.version) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + yielded = False + found_prereleases = [] + + kw = {"prereleases": prereleases if prereleases is not None else True} + + # Attempt to iterate over all the values in the iterable and if any of + # them match, yield them. + for version in iterable: + parsed_version = self._coerce_version(version) + + if self.contains(parsed_version, **kw): + # If our version is a prerelease, and we were not set to allow + # prereleases, then we'll store it for later in case nothing + # else matches this specifier. + if parsed_version.is_prerelease and not ( + prereleases or self.prereleases + ): + found_prereleases.append(version) + # Either this is not a prerelease, or we should have been + # accepting prereleases from the beginning. + else: + yielded = True + yield version + + # Now that we've iterated over everything, determine if we've yielded + # any values, and if we have not and we have any prereleases stored up + # then we will go ahead and yield the prereleases. + if not yielded and found_prereleases: + for version in found_prereleases: + yield version + + +class LegacySpecifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(==|!=|<=|>=|<|>)) + \s* + (?P + [^,;\s)]* # Since this is a "legacy" specifier, and the version + # string can be just about anything, we match everything + # except for whitespace, a semi-colon for marker support, + # a closing paren since versions can be enclosed in + # them, and a comma since it's a version separator. + ) + """ + + _regex = re.compile(r"^\s*" + _regex_str + r"\s*$", re.VERBOSE | re.IGNORECASE) + + _operators = { + "==": "equal", + "!=": "not_equal", + "<=": "less_than_equal", + ">=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + } + + def __init__(self, spec: str = "", prereleases: Optional[bool] = None) -> None: + super().__init__(spec, prereleases) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def _coerce_version(self, version: UnparsedVersion) -> LegacyVersion: + if not isinstance(version, LegacyVersion): + version = LegacyVersion(str(version)) + return version + + def _compare_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective == self._coerce_version(spec) + + def _compare_not_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective != self._coerce_version(spec) + + def _compare_less_than_equal(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective <= self._coerce_version(spec) + + def _compare_greater_than_equal( + self, prospective: LegacyVersion, spec: str + ) -> bool: + return prospective >= self._coerce_version(spec) + + def _compare_less_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective < self._coerce_version(spec) + + def _compare_greater_than(self, prospective: LegacyVersion, spec: str) -> bool: + return prospective > self._coerce_version(spec) + + +def _require_version_compare( + fn: Callable[["Specifier", ParsedVersion, str], bool] +) -> Callable[["Specifier", ParsedVersion, str], bool]: + @functools.wraps(fn) + def wrapped(self: "Specifier", prospective: ParsedVersion, spec: str) -> bool: + if not isinstance(prospective, Version): + return False + return fn(self, prospective, spec) + + return wrapped + + +class Specifier(_IndividualSpecifier): + + _regex_str = r""" + (?P(~=|==|!=|<=|>=|<|>|===)) + (?P + (?: + # The identity operators allow for an escape hatch that will + # do an exact string match of the version you wish to install. + # This will not be parsed by PEP 440 and we cannot determine + # any semantic meaning from it. This operator is discouraged + # but included entirely as an escape hatch. + (?<====) # Only match for the identity operator + \s* + [^\s]* # We just match everything, except for whitespace + # since we are only testing for strict identity. + ) + | + (?: + # The (non)equality operators allow for wild card and local + # versions to be specified so we have to define these two + # operators separately to enable that. + (?<===|!=) # Only match for equals and not equals + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)* # release + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + + # You cannot use a wild card and a dev or local version + # together so group them with a | and make them optional. + (?: + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + (?:\+[a-z0-9]+(?:[-_\.][a-z0-9]+)*)? # local + | + \.\* # Wild card syntax of .* + )? + ) + | + (?: + # The compatible operator requires at least two digits in the + # release segment. + (?<=~=) # Only match for the compatible operator + + \s* + v? + (?:[0-9]+!)? # epoch + [0-9]+(?:\.[0-9]+)+ # release (We have a + instead of a *) + (?: # pre release + [-_\.]? + (a|b|c|rc|alpha|beta|pre|preview) + [-_\.]? + [0-9]* + )? + (?: # post release + (?:-[0-9]+)|(?:[-_\.]?(post|rev|r)[-_\.]?[0-9]*) + )? + (?:[-_\.]?dev[-_\.]?[0-9]*)? # dev release + ) + | + (?: + # All other operators only allow a sub set of what the + # (non)equality operators do. Specifically they do not allow + # local versions to be specified nor do they allow the prefix + # matching wild cards. + (?=": "greater_than_equal", + "<": "less_than", + ">": "greater_than", + "===": "arbitrary", + } + + @_require_version_compare + def _compare_compatible(self, prospective: ParsedVersion, spec: str) -> bool: + + # Compatible releases have an equivalent combination of >= and ==. That + # is that ~=2.2 is equivalent to >=2.2,==2.*. This allows us to + # implement this in terms of the other specifiers instead of + # implementing it ourselves. The only thing we need to do is construct + # the other specifiers. + + # We want everything but the last item in the version, but we want to + # ignore suffix segments. + prefix = ".".join( + list(itertools.takewhile(_is_not_suffix, _version_split(spec)))[:-1] + ) + + # Add the prefix notation to the end of our string + prefix += ".*" + + return self._get_operator(">=")(prospective, spec) and self._get_operator("==")( + prospective, prefix + ) + + @_require_version_compare + def _compare_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # We need special logic to handle prefix matching + if spec.endswith(".*"): + # In the case of prefix matching we want to ignore local segment. + prospective = Version(prospective.public) + # Split the spec out by dots, and pretend that there is an implicit + # dot in between a release segment and a pre-release segment. + split_spec = _version_split(spec[:-2]) # Remove the trailing .* + + # Split the prospective version out by dots, and pretend that there + # is an implicit dot in between a release segment and a pre-release + # segment. + split_prospective = _version_split(str(prospective)) + + # Shorten the prospective version to be the same length as the spec + # so that we can determine if the specifier is a prefix of the + # prospective version or not. + shortened_prospective = split_prospective[: len(split_spec)] + + # Pad out our two sides with zeros so that they both equal the same + # length. + padded_spec, padded_prospective = _pad_version( + split_spec, shortened_prospective + ) + + return padded_prospective == padded_spec + else: + # Convert our spec string into a Version + spec_version = Version(spec) + + # If the specifier does not have a local segment, then we want to + # act as if the prospective version also does not have a local + # segment. + if not spec_version.local: + prospective = Version(prospective.public) + + return prospective == spec_version + + @_require_version_compare + def _compare_not_equal(self, prospective: ParsedVersion, spec: str) -> bool: + return not self._compare_equal(prospective, spec) + + @_require_version_compare + def _compare_less_than_equal(self, prospective: ParsedVersion, spec: str) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) <= Version(spec) + + @_require_version_compare + def _compare_greater_than_equal( + self, prospective: ParsedVersion, spec: str + ) -> bool: + + # NB: Local version identifiers are NOT permitted in the version + # specifier, so local version labels can be universally removed from + # the prospective version. + return Version(prospective.public) >= Version(spec) + + @_require_version_compare + def _compare_less_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is less than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective < spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a pre-release version, that we do not accept pre-release + # versions for the version mentioned in the specifier (e.g. <3.1 should + # not match 3.1.dev0, but should match 3.0.dev0). + if not spec.is_prerelease and prospective.is_prerelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # less than the spec version *and* it's not a pre-release of the same + # version in the spec. + return True + + @_require_version_compare + def _compare_greater_than(self, prospective: ParsedVersion, spec_str: str) -> bool: + + # Convert our spec to a Version instance, since we'll want to work with + # it as a version. + spec = Version(spec_str) + + # Check to see if the prospective version is greater than the spec + # version. If it's not we can short circuit and just return False now + # instead of doing extra unneeded work. + if not prospective > spec: + return False + + # This special case is here so that, unless the specifier itself + # includes is a post-release version, that we do not accept + # post-release versions for the version mentioned in the specifier + # (e.g. >3.1 should not match 3.0.post0, but should match 3.2.post0). + if not spec.is_postrelease and prospective.is_postrelease: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # Ensure that we do not allow a local version of the version mentioned + # in the specifier, which is technically greater than, to match. + if prospective.local is not None: + if Version(prospective.base_version) == Version(spec.base_version): + return False + + # If we've gotten to here, it means that prospective version is both + # greater than the spec version *and* it's not a pre-release of the + # same version in the spec. + return True + + def _compare_arbitrary(self, prospective: Version, spec: str) -> bool: + return str(prospective).lower() == str(spec).lower() + + @property + def prereleases(self) -> bool: + + # If there is an explicit prereleases set for this, then we'll just + # blindly use that. + if self._prereleases is not None: + return self._prereleases + + # Look at all of our specifiers and determine if they are inclusive + # operators, and if they are if they are including an explicit + # prerelease. + operator, version = self._spec + if operator in ["==", ">=", "<=", "~=", "==="]: + # The == specifier can include a trailing .*, if it does we + # want to remove before parsing. + if operator == "==" and version.endswith(".*"): + version = version[:-2] + + # Parse the version, and if it is a pre-release than this + # specifier allows pre-releases. + if parse(version).is_prerelease: + return True + + return False + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + +_prefix_regex = re.compile(r"^([0-9]+)((?:a|b|c|rc)[0-9]+)$") + + +def _version_split(version: str) -> List[str]: + result: List[str] = [] + for item in version.split("."): + match = _prefix_regex.search(item) + if match: + result.extend(match.groups()) + else: + result.append(item) + return result + + +def _is_not_suffix(segment: str) -> bool: + return not any( + segment.startswith(prefix) for prefix in ("dev", "a", "b", "rc", "post") + ) + + +def _pad_version(left: List[str], right: List[str]) -> Tuple[List[str], List[str]]: + left_split, right_split = [], [] + + # Get the release segment of our versions + left_split.append(list(itertools.takewhile(lambda x: x.isdigit(), left))) + right_split.append(list(itertools.takewhile(lambda x: x.isdigit(), right))) + + # Get the rest of our versions + left_split.append(left[len(left_split[0]) :]) + right_split.append(right[len(right_split[0]) :]) + + # Insert our padding + left_split.insert(1, ["0"] * max(0, len(right_split[0]) - len(left_split[0]))) + right_split.insert(1, ["0"] * max(0, len(left_split[0]) - len(right_split[0]))) + + return (list(itertools.chain(*left_split)), list(itertools.chain(*right_split))) + + +class SpecifierSet(BaseSpecifier): + def __init__( + self, specifiers: str = "", prereleases: Optional[bool] = None + ) -> None: + + # Split on , to break each individual specifier into it's own item, and + # strip each item to remove leading/trailing whitespace. + split_specifiers = [s.strip() for s in specifiers.split(",") if s.strip()] + + # Parsed each individual specifier, attempting first to make it a + # Specifier and falling back to a LegacySpecifier. + parsed: Set[_IndividualSpecifier] = set() + for specifier in split_specifiers: + try: + parsed.add(Specifier(specifier)) + except InvalidSpecifier: + parsed.add(LegacySpecifier(specifier)) + + # Turn our parsed specifiers into a frozen set and save them for later. + self._specs = frozenset(parsed) + + # Store our prereleases value so we can use it later to determine if + # we accept prereleases or not. + self._prereleases = prereleases + + def __repr__(self) -> str: + pre = ( + f", prereleases={self.prereleases!r}" + if self._prereleases is not None + else "" + ) + + return f"" + + def __str__(self) -> str: + return ",".join(sorted(str(s) for s in self._specs)) + + def __hash__(self) -> int: + return hash(self._specs) + + def __and__(self, other: Union["SpecifierSet", str]) -> "SpecifierSet": + if isinstance(other, str): + other = SpecifierSet(other) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + specifier = SpecifierSet() + specifier._specs = frozenset(self._specs | other._specs) + + if self._prereleases is None and other._prereleases is not None: + specifier._prereleases = other._prereleases + elif self._prereleases is not None and other._prereleases is None: + specifier._prereleases = self._prereleases + elif self._prereleases == other._prereleases: + specifier._prereleases = self._prereleases + else: + raise ValueError( + "Cannot combine SpecifierSets with True and False prerelease " + "overrides." + ) + + return specifier + + def __eq__(self, other: object) -> bool: + if isinstance(other, (str, _IndividualSpecifier)): + other = SpecifierSet(str(other)) + elif not isinstance(other, SpecifierSet): + return NotImplemented + + return self._specs == other._specs + + def __len__(self) -> int: + return len(self._specs) + + def __iter__(self) -> Iterator[_IndividualSpecifier]: + return iter(self._specs) + + @property + def prereleases(self) -> Optional[bool]: + + # If we have been given an explicit prerelease modifier, then we'll + # pass that through here. + if self._prereleases is not None: + return self._prereleases + + # If we don't have any specifiers, and we don't have a forced value, + # then we'll just return None since we don't know if this should have + # pre-releases or not. + if not self._specs: + return None + + # Otherwise we'll see if any of the given specifiers accept + # prereleases, if any of them do we'll return True, otherwise False. + return any(s.prereleases for s in self._specs) + + @prereleases.setter + def prereleases(self, value: bool) -> None: + self._prereleases = value + + def __contains__(self, item: UnparsedVersion) -> bool: + return self.contains(item) + + def contains( + self, item: UnparsedVersion, prereleases: Optional[bool] = None + ) -> bool: + + # Ensure that our item is a Version or LegacyVersion instance. + if not isinstance(item, (LegacyVersion, Version)): + item = parse(item) + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # We can determine if we're going to allow pre-releases by looking to + # see if any of the underlying items supports them. If none of them do + # and this item is a pre-release then we do not allow it and we can + # short circuit that here. + # Note: This means that 1.0.dev1 would not be contained in something + # like >=1.0.devabc however it would be in >=1.0.debabc,>0.0.dev0 + if not prereleases and item.is_prerelease: + return False + + # We simply dispatch to the underlying specs here to make sure that the + # given version is contained within all of them. + # Note: This use of all() here means that an empty set of specifiers + # will always return True, this is an explicit design decision. + return all(s.contains(item, prereleases=prereleases) for s in self._specs) + + def filter( + self, iterable: Iterable[VersionTypeVar], prereleases: Optional[bool] = None + ) -> Iterable[VersionTypeVar]: + + # Determine if we're forcing a prerelease or not, if we're not forcing + # one for this particular filter call, then we'll use whatever the + # SpecifierSet thinks for whether or not we should support prereleases. + if prereleases is None: + prereleases = self.prereleases + + # If we have any specifiers, then we want to wrap our iterable in the + # filter method for each one, this will act as a logical AND amongst + # each specifier. + if self._specs: + for spec in self._specs: + iterable = spec.filter(iterable, prereleases=bool(prereleases)) + return iterable + # If we do not have any specifiers, then we need to have a rough filter + # which will filter out any pre-releases, unless there are no final + # releases, and which will filter out LegacyVersion in general. + else: + filtered: List[VersionTypeVar] = [] + found_prereleases: List[VersionTypeVar] = [] + + item: UnparsedVersion + parsed_version: Union[Version, LegacyVersion] + + for item in iterable: + # Ensure that we some kind of Version class for this item. + if not isinstance(item, (LegacyVersion, Version)): + parsed_version = parse(item) + else: + parsed_version = item + + # Filter out any item which is parsed as a LegacyVersion + if isinstance(parsed_version, LegacyVersion): + continue + + # Store any item which is a pre-release for later unless we've + # already found a final version or we are accepting prereleases + if parsed_version.is_prerelease and not prereleases: + if not filtered: + found_prereleases.append(item) + else: + filtered.append(item) + + # If we've found no items except for pre-releases, then we'll go + # ahead and use the pre-releases + if not filtered and found_prereleases and prereleases is None: + return found_prereleases + + return filtered diff --git a/src/poetry/core/_vendor/packaging/tags.py b/src/poetry/core/_vendor/packaging/tags.py new file mode 100644 index 0000000..9a3d25a --- /dev/null +++ b/src/poetry/core/_vendor/packaging/tags.py @@ -0,0 +1,487 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import logging +import platform +import sys +import sysconfig +from importlib.machinery import EXTENSION_SUFFIXES +from typing import ( + Dict, + FrozenSet, + Iterable, + Iterator, + List, + Optional, + Sequence, + Tuple, + Union, + cast, +) + +from . import _manylinux, _musllinux + +logger = logging.getLogger(__name__) + +PythonVersion = Sequence[int] +MacVersion = Tuple[int, int] + +INTERPRETER_SHORT_NAMES: Dict[str, str] = { + "python": "py", # Generic. + "cpython": "cp", + "pypy": "pp", + "ironpython": "ip", + "jython": "jy", +} + + +_32_BIT_INTERPRETER = sys.maxsize <= 2 ** 32 + + +class Tag: + """ + A representation of the tag triple for a wheel. + + Instances are considered immutable and thus are hashable. Equality checking + is also supported. + """ + + __slots__ = ["_interpreter", "_abi", "_platform", "_hash"] + + def __init__(self, interpreter: str, abi: str, platform: str) -> None: + self._interpreter = interpreter.lower() + self._abi = abi.lower() + self._platform = platform.lower() + # The __hash__ of every single element in a Set[Tag] will be evaluated each time + # that a set calls its `.disjoint()` method, which may be called hundreds of + # times when scanning a page of links for packages with tags matching that + # Set[Tag]. Pre-computing the value here produces significant speedups for + # downstream consumers. + self._hash = hash((self._interpreter, self._abi, self._platform)) + + @property + def interpreter(self) -> str: + return self._interpreter + + @property + def abi(self) -> str: + return self._abi + + @property + def platform(self) -> str: + return self._platform + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Tag): + return NotImplemented + + return ( + (self._hash == other._hash) # Short-circuit ASAP for perf reasons. + and (self._platform == other._platform) + and (self._abi == other._abi) + and (self._interpreter == other._interpreter) + ) + + def __hash__(self) -> int: + return self._hash + + def __str__(self) -> str: + return f"{self._interpreter}-{self._abi}-{self._platform}" + + def __repr__(self) -> str: + return f"<{self} @ {id(self)}>" + + +def parse_tag(tag: str) -> FrozenSet[Tag]: + """ + Parses the provided tag (e.g. `py3-none-any`) into a frozenset of Tag instances. + + Returning a set is required due to the possibility that the tag is a + compressed tag set. + """ + tags = set() + interpreters, abis, platforms = tag.split("-") + for interpreter in interpreters.split("."): + for abi in abis.split("."): + for platform_ in platforms.split("."): + tags.add(Tag(interpreter, abi, platform_)) + return frozenset(tags) + + +def _get_config_var(name: str, warn: bool = False) -> Union[int, str, None]: + value = sysconfig.get_config_var(name) + if value is None and warn: + logger.debug( + "Config variable '%s' is unset, Python ABI tag may be incorrect", name + ) + return value + + +def _normalize_string(string: str) -> str: + return string.replace(".", "_").replace("-", "_") + + +def _abi3_applies(python_version: PythonVersion) -> bool: + """ + Determine if the Python version supports abi3. + + PEP 384 was first implemented in Python 3.2. + """ + return len(python_version) > 1 and tuple(python_version) >= (3, 2) + + +def _cpython_abis(py_version: PythonVersion, warn: bool = False) -> List[str]: + py_version = tuple(py_version) # To allow for version comparison. + abis = [] + version = _version_nodot(py_version[:2]) + debug = pymalloc = ucs4 = "" + with_debug = _get_config_var("Py_DEBUG", warn) + has_refcount = hasattr(sys, "gettotalrefcount") + # Windows doesn't set Py_DEBUG, so checking for support of debug-compiled + # extension modules is the best option. + # https://github.com/pypa/pip/issues/3383#issuecomment-173267692 + has_ext = "_d.pyd" in EXTENSION_SUFFIXES + if with_debug or (with_debug is None and (has_refcount or has_ext)): + debug = "d" + if py_version < (3, 8): + with_pymalloc = _get_config_var("WITH_PYMALLOC", warn) + if with_pymalloc or with_pymalloc is None: + pymalloc = "m" + if py_version < (3, 3): + unicode_size = _get_config_var("Py_UNICODE_SIZE", warn) + if unicode_size == 4 or ( + unicode_size is None and sys.maxunicode == 0x10FFFF + ): + ucs4 = "u" + elif debug: + # Debug builds can also load "normal" extension modules. + # We can also assume no UCS-4 or pymalloc requirement. + abis.append(f"cp{version}") + abis.insert( + 0, + "cp{version}{debug}{pymalloc}{ucs4}".format( + version=version, debug=debug, pymalloc=pymalloc, ucs4=ucs4 + ), + ) + return abis + + +def cpython_tags( + python_version: Optional[PythonVersion] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a CPython interpreter. + + The tags consist of: + - cp-- + - cp-abi3- + - cp-none- + - cp-abi3- # Older Python versions down to 3.2. + + If python_version only specifies a major version then user-provided ABIs and + the 'none' ABItag will be used. + + If 'abi3' or 'none' are specified in 'abis' then they will be yielded at + their normal position and not at the beginning. + """ + if not python_version: + python_version = sys.version_info[:2] + + interpreter = f"cp{_version_nodot(python_version[:2])}" + + if abis is None: + if len(python_version) > 1: + abis = _cpython_abis(python_version, warn) + else: + abis = [] + abis = list(abis) + # 'abi3' and 'none' are explicitly handled later. + for explicit_abi in ("abi3", "none"): + try: + abis.remove(explicit_abi) + except ValueError: + pass + + platforms = list(platforms or platform_tags()) + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + if _abi3_applies(python_version): + yield from (Tag(interpreter, "abi3", platform_) for platform_ in platforms) + yield from (Tag(interpreter, "none", platform_) for platform_ in platforms) + + if _abi3_applies(python_version): + for minor_version in range(python_version[1] - 1, 1, -1): + for platform_ in platforms: + interpreter = "cp{version}".format( + version=_version_nodot((python_version[0], minor_version)) + ) + yield Tag(interpreter, "abi3", platform_) + + +def _generic_abi() -> Iterator[str]: + abi = sysconfig.get_config_var("SOABI") + if abi: + yield _normalize_string(abi) + + +def generic_tags( + interpreter: Optional[str] = None, + abis: Optional[Iterable[str]] = None, + platforms: Optional[Iterable[str]] = None, + *, + warn: bool = False, +) -> Iterator[Tag]: + """ + Yields the tags for a generic interpreter. + + The tags consist of: + - -- + + The "none" ABI will be added if it was not explicitly provided. + """ + if not interpreter: + interp_name = interpreter_name() + interp_version = interpreter_version(warn=warn) + interpreter = "".join([interp_name, interp_version]) + if abis is None: + abis = _generic_abi() + platforms = list(platforms or platform_tags()) + abis = list(abis) + if "none" not in abis: + abis.append("none") + for abi in abis: + for platform_ in platforms: + yield Tag(interpreter, abi, platform_) + + +def _py_interpreter_range(py_version: PythonVersion) -> Iterator[str]: + """ + Yields Python versions in descending order. + + After the latest version, the major-only version will be yielded, and then + all previous versions of that major version. + """ + if len(py_version) > 1: + yield f"py{_version_nodot(py_version[:2])}" + yield f"py{py_version[0]}" + if len(py_version) > 1: + for minor in range(py_version[1] - 1, -1, -1): + yield f"py{_version_nodot((py_version[0], minor))}" + + +def compatible_tags( + python_version: Optional[PythonVersion] = None, + interpreter: Optional[str] = None, + platforms: Optional[Iterable[str]] = None, +) -> Iterator[Tag]: + """ + Yields the sequence of tags that are compatible with a specific version of Python. + + The tags consist of: + - py*-none- + - -none-any # ... if `interpreter` is provided. + - py*-none-any + """ + if not python_version: + python_version = sys.version_info[:2] + platforms = list(platforms or platform_tags()) + for version in _py_interpreter_range(python_version): + for platform_ in platforms: + yield Tag(version, "none", platform_) + if interpreter: + yield Tag(interpreter, "none", "any") + for version in _py_interpreter_range(python_version): + yield Tag(version, "none", "any") + + +def _mac_arch(arch: str, is_32bit: bool = _32_BIT_INTERPRETER) -> str: + if not is_32bit: + return arch + + if arch.startswith("ppc"): + return "ppc" + + return "i386" + + +def _mac_binary_formats(version: MacVersion, cpu_arch: str) -> List[str]: + formats = [cpu_arch] + if cpu_arch == "x86_64": + if version < (10, 4): + return [] + formats.extend(["intel", "fat64", "fat32"]) + + elif cpu_arch == "i386": + if version < (10, 4): + return [] + formats.extend(["intel", "fat32", "fat"]) + + elif cpu_arch == "ppc64": + # TODO: Need to care about 32-bit PPC for ppc64 through 10.2? + if version > (10, 5) or version < (10, 4): + return [] + formats.append("fat64") + + elif cpu_arch == "ppc": + if version > (10, 6): + return [] + formats.extend(["fat32", "fat"]) + + if cpu_arch in {"arm64", "x86_64"}: + formats.append("universal2") + + if cpu_arch in {"x86_64", "i386", "ppc64", "ppc", "intel"}: + formats.append("universal") + + return formats + + +def mac_platforms( + version: Optional[MacVersion] = None, arch: Optional[str] = None +) -> Iterator[str]: + """ + Yields the platform tags for a macOS system. + + The `version` parameter is a two-item tuple specifying the macOS version to + generate platform tags for. The `arch` parameter is the CPU architecture to + generate platform tags for. Both parameters default to the appropriate value + for the current system. + """ + version_str, _, cpu_arch = platform.mac_ver() + if version is None: + version = cast("MacVersion", tuple(map(int, version_str.split(".")[:2]))) + else: + version = version + if arch is None: + arch = _mac_arch(cpu_arch) + else: + arch = arch + + if (10, 0) <= version and version < (11, 0): + # Prior to Mac OS 11, each yearly release of Mac OS bumped the + # "minor" version number. The major version was always 10. + for minor_version in range(version[1], -1, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=10, minor=minor_version, binary_format=binary_format + ) + + if version >= (11, 0): + # Starting with Mac OS 11, each yearly release bumps the major version + # number. The minor versions are now the midyear updates. + for major_version in range(version[0], 10, -1): + compat_version = major_version, 0 + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=major_version, minor=0, binary_format=binary_format + ) + + if version >= (11, 0): + # Mac OS 11 on x86_64 is compatible with binaries from previous releases. + # Arm64 support was introduced in 11.0, so no Arm binaries from previous + # releases exist. + # + # However, the "universal2" binary format can have a + # macOS version earlier than 11.0 when the x86_64 part of the binary supports + # that version of macOS. + if arch == "x86_64": + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_formats = _mac_binary_formats(compat_version, arch) + for binary_format in binary_formats: + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + else: + for minor_version in range(16, 3, -1): + compat_version = 10, minor_version + binary_format = "universal2" + yield "macosx_{major}_{minor}_{binary_format}".format( + major=compat_version[0], + minor=compat_version[1], + binary_format=binary_format, + ) + + +def _linux_platforms(is_32bit: bool = _32_BIT_INTERPRETER) -> Iterator[str]: + linux = _normalize_string(sysconfig.get_platform()) + if is_32bit: + if linux == "linux_x86_64": + linux = "linux_i686" + elif linux == "linux_aarch64": + linux = "linux_armv7l" + _, arch = linux.split("_", 1) + yield from _manylinux.platform_tags(linux, arch) + yield from _musllinux.platform_tags(arch) + yield linux + + +def _generic_platforms() -> Iterator[str]: + yield _normalize_string(sysconfig.get_platform()) + + +def platform_tags() -> Iterator[str]: + """ + Provides the platform tags for this installation. + """ + if platform.system() == "Darwin": + return mac_platforms() + elif platform.system() == "Linux": + return _linux_platforms() + else: + return _generic_platforms() + + +def interpreter_name() -> str: + """ + Returns the name of the running interpreter. + """ + name = sys.implementation.name + return INTERPRETER_SHORT_NAMES.get(name) or name + + +def interpreter_version(*, warn: bool = False) -> str: + """ + Returns the version of the running interpreter. + """ + version = _get_config_var("py_version_nodot", warn=warn) + if version: + version = str(version) + else: + version = _version_nodot(sys.version_info[:2]) + return version + + +def _version_nodot(version: PythonVersion) -> str: + return "".join(map(str, version)) + + +def sys_tags(*, warn: bool = False) -> Iterator[Tag]: + """ + Returns the sequence of tag triples for the running interpreter. + + The order of the sequence corresponds to priority order for the + interpreter, from most to least important. + """ + + interp_name = interpreter_name() + if interp_name == "cp": + yield from cpython_tags(warn=warn) + else: + yield from generic_tags() + + if interp_name == "pp": + yield from compatible_tags(interpreter="pp3") + else: + yield from compatible_tags() diff --git a/src/poetry/core/_vendor/packaging/utils.py b/src/poetry/core/_vendor/packaging/utils.py new file mode 100644 index 0000000..bab11b8 --- /dev/null +++ b/src/poetry/core/_vendor/packaging/utils.py @@ -0,0 +1,136 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import re +from typing import FrozenSet, NewType, Tuple, Union, cast + +from .tags import Tag, parse_tag +from .version import InvalidVersion, Version + +BuildTag = Union[Tuple[()], Tuple[int, str]] +NormalizedName = NewType("NormalizedName", str) + + +class InvalidWheelFilename(ValueError): + """ + An invalid wheel filename was found, users should refer to PEP 427. + """ + + +class InvalidSdistFilename(ValueError): + """ + An invalid sdist filename was found, users should refer to the packaging user guide. + """ + + +_canonicalize_regex = re.compile(r"[-_.]+") +# PEP 427: The build number must start with a digit. +_build_tag_regex = re.compile(r"(\d+)(.*)") + + +def canonicalize_name(name: str) -> NormalizedName: + # This is taken from PEP 503. + value = _canonicalize_regex.sub("-", name).lower() + return cast(NormalizedName, value) + + +def canonicalize_version(version: Union[Version, str]) -> str: + """ + This is very similar to Version.__str__, but has one subtle difference + with the way it handles the release segment. + """ + if isinstance(version, str): + try: + parsed = Version(version) + except InvalidVersion: + # Legacy versions cannot be normalized + return version + else: + parsed = version + + parts = [] + + # Epoch + if parsed.epoch != 0: + parts.append(f"{parsed.epoch}!") + + # Release segment + # NB: This strips trailing '.0's to normalize + parts.append(re.sub(r"(\.0)+$", "", ".".join(str(x) for x in parsed.release))) + + # Pre-release + if parsed.pre is not None: + parts.append("".join(str(x) for x in parsed.pre)) + + # Post-release + if parsed.post is not None: + parts.append(f".post{parsed.post}") + + # Development release + if parsed.dev is not None: + parts.append(f".dev{parsed.dev}") + + # Local version segment + if parsed.local is not None: + parts.append(f"+{parsed.local}") + + return "".join(parts) + + +def parse_wheel_filename( + filename: str, +) -> Tuple[NormalizedName, Version, BuildTag, FrozenSet[Tag]]: + if not filename.endswith(".whl"): + raise InvalidWheelFilename( + f"Invalid wheel filename (extension must be '.whl'): {filename}" + ) + + filename = filename[:-4] + dashes = filename.count("-") + if dashes not in (4, 5): + raise InvalidWheelFilename( + f"Invalid wheel filename (wrong number of parts): {filename}" + ) + + parts = filename.split("-", dashes - 2) + name_part = parts[0] + # See PEP 427 for the rules on escaping the project name + if "__" in name_part or re.match(r"^[\w\d._]*$", name_part, re.UNICODE) is None: + raise InvalidWheelFilename(f"Invalid project name: {filename}") + name = canonicalize_name(name_part) + version = Version(parts[1]) + if dashes == 5: + build_part = parts[2] + build_match = _build_tag_regex.match(build_part) + if build_match is None: + raise InvalidWheelFilename( + f"Invalid build number: {build_part} in '{filename}'" + ) + build = cast(BuildTag, (int(build_match.group(1)), build_match.group(2))) + else: + build = () + tags = parse_tag(parts[-1]) + return (name, version, build, tags) + + +def parse_sdist_filename(filename: str) -> Tuple[NormalizedName, Version]: + if filename.endswith(".tar.gz"): + file_stem = filename[: -len(".tar.gz")] + elif filename.endswith(".zip"): + file_stem = filename[: -len(".zip")] + else: + raise InvalidSdistFilename( + f"Invalid sdist filename (extension must be '.tar.gz' or '.zip'):" + f" {filename}" + ) + + # We are requiring a PEP 440 version, which cannot contain dashes, + # so we split on the last dash. + name_part, sep, version_part = file_stem.rpartition("-") + if not sep: + raise InvalidSdistFilename(f"Invalid sdist filename: {filename}") + + name = canonicalize_name(name_part) + version = Version(version_part) + return (name, version) diff --git a/src/poetry/core/_vendor/packaging/version.py b/src/poetry/core/_vendor/packaging/version.py new file mode 100644 index 0000000..de9a09a --- /dev/null +++ b/src/poetry/core/_vendor/packaging/version.py @@ -0,0 +1,504 @@ +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. See the LICENSE file in the root of this repository +# for complete details. + +import collections +import itertools +import re +import warnings +from typing import Callable, Iterator, List, Optional, SupportsInt, Tuple, Union + +from ._structures import Infinity, InfinityType, NegativeInfinity, NegativeInfinityType + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, Tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + Tuple[ + Union[ + SubLocalType, + Tuple[SubLocalType, str], + Tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = Tuple[ + int, Tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = Tuple[int, Tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> Union["LegacyVersion", "Version"]: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + """ + + +class _BaseVersion: + _key: Union[CmpKey, LegacyCmpKey] + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: "_BaseVersion") -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + part = _legacy_version_replacement_map.get(part, part) + + if not part or part == ".": + continue + + if part[:1] in "0123456789": + # pad for numeric comparison + yield part.zfill(8) + else: + yield "*" + part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: List[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join(str(x) for x in self.pre))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> Tuple[int, ...]:
+        _release: Tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> Optional[Tuple[str, int]]:
+        _pre: Optional[Tuple[str, int]] = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> Optional[int]:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> Optional[int]:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> Optional[str]:
+        if self._version.local:
+            return ".".join(str(x) for x in self._version.local)
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join(str(x) for x in self.release))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: Union[str, bytes, SupportsInt]
+) -> Optional[Tuple[str, int]]:
+
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> Optional[LocalType]:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: Tuple[int, ...],
+    pre: Optional[Tuple[str, int]],
+    post: Optional[Tuple[str, int]],
+    dev: Optional[Tuple[str, int]],
+    local: Optional[Tuple[SubLocalType]],
+) -> CmpKey:
+
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/src/poetry/core/_vendor/pyparsing/LICENSE b/src/poetry/core/_vendor/pyparsing/LICENSE
new file mode 100644
index 0000000..1bf9852
--- /dev/null
+++ b/src/poetry/core/_vendor/pyparsing/LICENSE
@@ -0,0 +1,18 @@
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/src/poetry/core/_vendor/pyparsing/__init__.py b/src/poetry/core/_vendor/pyparsing/__init__.py
new file mode 100644
index 0000000..7802ff1
--- /dev/null
+++ b/src/poetry/core/_vendor/pyparsing/__init__.py
@@ -0,0 +1,331 @@
+# module pyparsing.py
+#
+# Copyright (c) 2003-2022  Paul T. McGuire
+#
+# Permission is hereby granted, free of charge, to any person obtaining
+# a copy of this software and associated documentation files (the
+# "Software"), to deal in the Software without restriction, including
+# without limitation the rights to use, copy, modify, merge, publish,
+# distribute, sublicense, and/or sell copies of the Software, and to
+# permit persons to whom the Software is furnished to do so, subject to
+# the following conditions:
+#
+# The above copyright notice and this permission notice shall be
+# included in all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+#
+
+__doc__ = """
+pyparsing module - Classes and methods to define and execute parsing grammars
+=============================================================================
+
+The pyparsing module is an alternative approach to creating and
+executing simple grammars, vs. the traditional lex/yacc approach, or the
+use of regular expressions.  With pyparsing, you don't need to learn
+a new syntax for defining grammars or matching expressions - the parsing
+module provides a library of classes that you use to construct the
+grammar directly in Python.
+
+Here is a program to parse "Hello, World!" (or any greeting of the form
+``", !"``), built up using :class:`Word`,
+:class:`Literal`, and :class:`And` elements
+(the :meth:`'+'` operators create :class:`And` expressions,
+and the strings are auto-converted to :class:`Literal` expressions)::
+
+    from pyparsing import Word, alphas
+
+    # define grammar of a greeting
+    greet = Word(alphas) + "," + Word(alphas) + "!"
+
+    hello = "Hello, World!"
+    print(hello, "->", greet.parse_string(hello))
+
+The program outputs the following::
+
+    Hello, World! -> ['Hello', ',', 'World', '!']
+
+The Python representation of the grammar is quite readable, owing to the
+self-explanatory class names, and the use of :class:`'+'`,
+:class:`'|'`, :class:`'^'` and :class:`'&'` operators.
+
+The :class:`ParseResults` object returned from
+:class:`ParserElement.parseString` can be
+accessed as a nested list, a dictionary, or an object with named
+attributes.
+
+The pyparsing module handles some of the problems that are typically
+vexing when writing text parsers:
+
+  - extra or missing whitespace (the above program will also handle
+    "Hello,World!", "Hello  ,  World  !", etc.)
+  - quoted strings
+  - embedded comments
+
+
+Getting Started -
+-----------------
+Visit the classes :class:`ParserElement` and :class:`ParseResults` to
+see the base classes that most other pyparsing
+classes inherit from. Use the docstrings for examples of how to:
+
+ - construct literal match expressions from :class:`Literal` and
+   :class:`CaselessLiteral` classes
+ - construct character word-group expressions using the :class:`Word`
+   class
+ - see how to create repetitive expressions using :class:`ZeroOrMore`
+   and :class:`OneOrMore` classes
+ - use :class:`'+'`, :class:`'|'`, :class:`'^'`,
+   and :class:`'&'` operators to combine simple expressions into
+   more complex ones
+ - associate names with your parsed results using
+   :class:`ParserElement.setResultsName`
+ - access the parsed data, which is returned as a :class:`ParseResults`
+   object
+ - find some helpful expression short-cuts like :class:`delimitedList`
+   and :class:`oneOf`
+ - find more useful common expressions in the :class:`pyparsing_common`
+   namespace class
+"""
+from typing import NamedTuple
+
+
+class version_info(NamedTuple):
+    major: int
+    minor: int
+    micro: int
+    releaselevel: str
+    serial: int
+
+    @property
+    def __version__(self):
+        return (
+            "{}.{}.{}".format(self.major, self.minor, self.micro)
+            + (
+                "{}{}{}".format(
+                    "r" if self.releaselevel[0] == "c" else "",
+                    self.releaselevel[0],
+                    self.serial,
+                ),
+                "",
+            )[self.releaselevel == "final"]
+        )
+
+    def __str__(self):
+        return "{} {} / {}".format(__name__, self.__version__, __version_time__)
+
+    def __repr__(self):
+        return "{}.{}({})".format(
+            __name__,
+            type(self).__name__,
+            ", ".join("{}={!r}".format(*nv) for nv in zip(self._fields, self)),
+        )
+
+
+__version_info__ = version_info(3, 0, 9, "final", 0)
+__version_time__ = "05 May 2022 07:02 UTC"
+__version__ = __version_info__.__version__
+__versionTime__ = __version_time__
+__author__ = "Paul McGuire "
+
+from .util import *
+from .exceptions import *
+from .actions import *
+from .core import __diag__, __compat__
+from .results import *
+from .core import *
+from .core import _builtin_exprs as core_builtin_exprs
+from .helpers import *
+from .helpers import _builtin_exprs as helper_builtin_exprs
+
+from .unicode import unicode_set, UnicodeRangeList, pyparsing_unicode as unicode
+from .testing import pyparsing_test as testing
+from .common import (
+    pyparsing_common as common,
+    _builtin_exprs as common_builtin_exprs,
+)
+
+# define backward compat synonyms
+if "pyparsing_unicode" not in globals():
+    pyparsing_unicode = unicode
+if "pyparsing_common" not in globals():
+    pyparsing_common = common
+if "pyparsing_test" not in globals():
+    pyparsing_test = testing
+
+core_builtin_exprs += common_builtin_exprs + helper_builtin_exprs
+
+
+__all__ = [
+    "__version__",
+    "__version_time__",
+    "__author__",
+    "__compat__",
+    "__diag__",
+    "And",
+    "AtLineStart",
+    "AtStringStart",
+    "CaselessKeyword",
+    "CaselessLiteral",
+    "CharsNotIn",
+    "Combine",
+    "Dict",
+    "Each",
+    "Empty",
+    "FollowedBy",
+    "Forward",
+    "GoToColumn",
+    "Group",
+    "IndentedBlock",
+    "Keyword",
+    "LineEnd",
+    "LineStart",
+    "Literal",
+    "Located",
+    "PrecededBy",
+    "MatchFirst",
+    "NoMatch",
+    "NotAny",
+    "OneOrMore",
+    "OnlyOnce",
+    "OpAssoc",
+    "Opt",
+    "Optional",
+    "Or",
+    "ParseBaseException",
+    "ParseElementEnhance",
+    "ParseException",
+    "ParseExpression",
+    "ParseFatalException",
+    "ParseResults",
+    "ParseSyntaxException",
+    "ParserElement",
+    "PositionToken",
+    "QuotedString",
+    "RecursiveGrammarException",
+    "Regex",
+    "SkipTo",
+    "StringEnd",
+    "StringStart",
+    "Suppress",
+    "Token",
+    "TokenConverter",
+    "White",
+    "Word",
+    "WordEnd",
+    "WordStart",
+    "ZeroOrMore",
+    "Char",
+    "alphanums",
+    "alphas",
+    "alphas8bit",
+    "any_close_tag",
+    "any_open_tag",
+    "c_style_comment",
+    "col",
+    "common_html_entity",
+    "counted_array",
+    "cpp_style_comment",
+    "dbl_quoted_string",
+    "dbl_slash_comment",
+    "delimited_list",
+    "dict_of",
+    "empty",
+    "hexnums",
+    "html_comment",
+    "identchars",
+    "identbodychars",
+    "java_style_comment",
+    "line",
+    "line_end",
+    "line_start",
+    "lineno",
+    "make_html_tags",
+    "make_xml_tags",
+    "match_only_at_col",
+    "match_previous_expr",
+    "match_previous_literal",
+    "nested_expr",
+    "null_debug_action",
+    "nums",
+    "one_of",
+    "printables",
+    "punc8bit",
+    "python_style_comment",
+    "quoted_string",
+    "remove_quotes",
+    "replace_with",
+    "replace_html_entity",
+    "rest_of_line",
+    "sgl_quoted_string",
+    "srange",
+    "string_end",
+    "string_start",
+    "trace_parse_action",
+    "unicode_string",
+    "with_attribute",
+    "indentedBlock",
+    "original_text_for",
+    "ungroup",
+    "infix_notation",
+    "locatedExpr",
+    "with_class",
+    "CloseMatch",
+    "token_map",
+    "pyparsing_common",
+    "pyparsing_unicode",
+    "unicode_set",
+    "condition_as_parse_action",
+    "pyparsing_test",
+    # pre-PEP8 compatibility names
+    "__versionTime__",
+    "anyCloseTag",
+    "anyOpenTag",
+    "cStyleComment",
+    "commonHTMLEntity",
+    "countedArray",
+    "cppStyleComment",
+    "dblQuotedString",
+    "dblSlashComment",
+    "delimitedList",
+    "dictOf",
+    "htmlComment",
+    "javaStyleComment",
+    "lineEnd",
+    "lineStart",
+    "makeHTMLTags",
+    "makeXMLTags",
+    "matchOnlyAtCol",
+    "matchPreviousExpr",
+    "matchPreviousLiteral",
+    "nestedExpr",
+    "nullDebugAction",
+    "oneOf",
+    "opAssoc",
+    "pythonStyleComment",
+    "quotedString",
+    "removeQuotes",
+    "replaceHTMLEntity",
+    "replaceWith",
+    "restOfLine",
+    "sglQuotedString",
+    "stringEnd",
+    "stringStart",
+    "traceParseAction",
+    "unicodeString",
+    "withAttribute",
+    "indentedBlock",
+    "originalTextFor",
+    "infixNotation",
+    "locatedExpr",
+    "withClass",
+    "tokenMap",
+    "conditionAsParseAction",
+    "autoname_elements",
+]
diff --git a/src/poetry/core/_vendor/pyparsing/actions.py b/src/poetry/core/_vendor/pyparsing/actions.py
new file mode 100644
index 0000000..f72c66e
--- /dev/null
+++ b/src/poetry/core/_vendor/pyparsing/actions.py
@@ -0,0 +1,207 @@
+# actions.py
+
+from .exceptions import ParseException
+from .util import col
+
+
+class OnlyOnce:
+    """
+    Wrapper for parse actions, to ensure they are only called once.
+    """
+
+    def __init__(self, method_call):
+        from .core import _trim_arity
+
+        self.callable = _trim_arity(method_call)
+        self.called = False
+
+    def __call__(self, s, l, t):
+        if not self.called:
+            results = self.callable(s, l, t)
+            self.called = True
+            return results
+        raise ParseException(s, l, "OnlyOnce obj called multiple times w/out reset")
+
+    def reset(self):
+        """
+        Allow the associated parse action to be called once more.
+        """
+
+        self.called = False
+
+
+def match_only_at_col(n):
+    """
+    Helper method for defining parse actions that require matching at
+    a specific column in the input text.
+    """
+
+    def verify_col(strg, locn, toks):
+        if col(locn, strg) != n:
+            raise ParseException(strg, locn, "matched token not at column {}".format(n))
+
+    return verify_col
+
+
+def replace_with(repl_str):
+    """
+    Helper method for common parse actions that simply return
+    a literal value.  Especially useful when used with
+    :class:`transform_string` ().
+
+    Example::
+
+        num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+        na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
+        term = na | num
+
+        term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
+    """
+    return lambda s, l, t: [repl_str]
+
+
+def remove_quotes(s, l, t):
+    """
+    Helper parse action for removing quotation marks from parsed
+    quoted strings.
+
+    Example::
+
+        # by default, quotation marks are included in parsed results
+        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+        # use remove_quotes to strip quotation marks from parsed results
+        quoted_string.set_parse_action(remove_quotes)
+        quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+    """
+    return t[0][1:-1]
+
+
+def with_attribute(*args, **attr_dict):
+    """
+    Helper to create a validating parse action to be used with start
+    tags created with :class:`make_xml_tags` or
+    :class:`make_html_tags`. Use ``with_attribute`` to qualify
+    a starting tag with a required attribute value, to avoid false
+    matches on common tags such as ```` or ``
``. + + Call ``with_attribute`` with a series of attribute names and + values. Specify the list of filter attributes names and values as: + + - keyword arguments, as in ``(align="right")``, or + - as an explicit dict with ``**`` operator, when an attribute + name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}`` + - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))`` + + For attribute names with a namespace prefix, you must use the second + form. Attribute names are matched insensitive to upper/lower case. + + If just testing for ``class`` (with or without a namespace), use + :class:`with_class`. + + To verify that the attribute exists, but without specifying a value, + pass ``with_attribute.ANY_VALUE`` as the value. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this has no type
+
+ + ''' + div,div_end = make_html_tags("div") + + # only match div tag having a type attribute with value "grid" + div_grid = div().set_parse_action(with_attribute(type="grid")) + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + # construct a match with any div tag having a type attribute, regardless of the value + div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + if args: + attrs = args[:] + else: + attrs = attr_dict.items() + attrs = [(k, v) for k, v in attrs] + + def pa(s, l, tokens): + for attrName, attrValue in attrs: + if attrName not in tokens: + raise ParseException(s, l, "no matching attribute " + attrName) + if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue: + raise ParseException( + s, + l, + "attribute {!r} has value {!r}, must be {!r}".format( + attrName, tokens[attrName], attrValue + ), + ) + + return pa + + +with_attribute.ANY_VALUE = object() + + +def with_class(classname, namespace=""): + """ + Simplified version of :class:`with_attribute` when + matching on a div class - made difficult because ``class`` is + a reserved word in Python. + + Example:: + + html = ''' +
+ Some text +
1 4 0 1 0
+
1,3 2,3 1,1
+
this <div> has no class
+
+ + ''' + div,div_end = make_html_tags("div") + div_grid = div().set_parse_action(with_class("grid")) + + grid_expr = div_grid + SkipTo(div | div_end)("body") + for grid_header in grid_expr.search_string(html): + print(grid_header.body) + + div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE)) + div_expr = div_any_type + SkipTo(div | div_end)("body") + for div_header in div_expr.search_string(html): + print(div_header.body) + + prints:: + + 1 4 0 1 0 + + 1 4 0 1 0 + 1,3 2,3 1,1 + """ + classattr = "{}:class".format(namespace) if namespace else "class" + return with_attribute(**{classattr: classname}) + + +# pre-PEP8 compatibility symbols +replaceWith = replace_with +removeQuotes = remove_quotes +withAttribute = with_attribute +withClass = with_class +matchOnlyAtCol = match_only_at_col diff --git a/src/poetry/core/_vendor/pyparsing/common.py b/src/poetry/core/_vendor/pyparsing/common.py new file mode 100644 index 0000000..1859fb7 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/common.py @@ -0,0 +1,424 @@ +# common.py +from .core import * +from .helpers import delimited_list, any_open_tag, any_close_tag +from datetime import datetime + + +# some other useful expressions - using lower-case class name since we are really using this as a namespace +class pyparsing_common: + """Here are some common low-level expressions that may be useful in + jump-starting parser development: + + - numeric forms (:class:`integers`, :class:`reals`, + :class:`scientific notation`) + - common :class:`programming identifiers` + - network addresses (:class:`MAC`, + :class:`IPv4`, :class:`IPv6`) + - ISO8601 :class:`dates` and + :class:`datetime` + - :class:`UUID` + - :class:`comma-separated list` + - :class:`url` + + Parse actions: + + - :class:`convertToInteger` + - :class:`convertToFloat` + - :class:`convertToDate` + - :class:`convertToDatetime` + - :class:`stripHTMLTags` + - :class:`upcaseTokens` + - :class:`downcaseTokens` + + Example:: + + pyparsing_common.number.runTests(''' + # any int or real number, returned as the appropriate type + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.fnumber.runTests(''' + # any int or real number, returned as float + 100 + -100 + +100 + 3.14159 + 6.02e23 + 1e-12 + ''') + + pyparsing_common.hex_integer.runTests(''' + # hex numbers + 100 + FF + ''') + + pyparsing_common.fraction.runTests(''' + # fractions + 1/2 + -3/4 + ''') + + pyparsing_common.mixed_integer.runTests(''' + # mixed fractions + 1 + 1/2 + -3/4 + 1-3/4 + ''') + + import uuid + pyparsing_common.uuid.setParseAction(tokenMap(uuid.UUID)) + pyparsing_common.uuid.runTests(''' + # uuid + 12345678-1234-5678-1234-567812345678 + ''') + + prints:: + + # any int or real number, returned as the appropriate type + 100 + [100] + + -100 + [-100] + + +100 + [100] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # any int or real number, returned as float + 100 + [100.0] + + -100 + [-100.0] + + +100 + [100.0] + + 3.14159 + [3.14159] + + 6.02e23 + [6.02e+23] + + 1e-12 + [1e-12] + + # hex numbers + 100 + [256] + + FF + [255] + + # fractions + 1/2 + [0.5] + + -3/4 + [-0.75] + + # mixed fractions + 1 + [1] + + 1/2 + [0.5] + + -3/4 + [-0.75] + + 1-3/4 + [1.75] + + # uuid + 12345678-1234-5678-1234-567812345678 + [UUID('12345678-1234-5678-1234-567812345678')] + """ + + convert_to_integer = token_map(int) + """ + Parse action for converting parsed integers to Python int + """ + + convert_to_float = token_map(float) + """ + Parse action for converting parsed numbers to Python float + """ + + integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer) + """expression that parses an unsigned integer, returns an int""" + + hex_integer = ( + Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16)) + ) + """expression that parses a hexadecimal integer, returns an int""" + + signed_integer = ( + Regex(r"[+-]?\d+") + .set_name("signed integer") + .set_parse_action(convert_to_integer) + ) + """expression that parses an integer with optional leading sign, returns an int""" + + fraction = ( + signed_integer().set_parse_action(convert_to_float) + + "/" + + signed_integer().set_parse_action(convert_to_float) + ).set_name("fraction") + """fractional expression of an integer divided by an integer, returns a float""" + fraction.add_parse_action(lambda tt: tt[0] / tt[-1]) + + mixed_integer = ( + fraction | signed_integer + Opt(Opt("-").suppress() + fraction) + ).set_name("fraction or mixed integer-fraction") + """mixed integer of the form 'integer - fraction', with optional leading integer, returns float""" + mixed_integer.add_parse_action(sum) + + real = ( + Regex(r"[+-]?(?:\d+\.\d*|\.\d+)") + .set_name("real number") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number and returns a float""" + + sci_real = ( + Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)") + .set_name("real number with scientific notation") + .set_parse_action(convert_to_float) + ) + """expression that parses a floating point number with optional + scientific notation and returns a float""" + + # streamlining this expression makes the docs nicer-looking + number = (sci_real | real | signed_integer).setName("number").streamline() + """any numeric expression, returns the corresponding Python type""" + + fnumber = ( + Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?") + .set_name("fnumber") + .set_parse_action(convert_to_float) + ) + """any int or real number, returned as float""" + + identifier = Word(identchars, identbodychars).set_name("identifier") + """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')""" + + ipv4_address = Regex( + r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}" + ).set_name("IPv4 address") + "IPv4 address (``0.0.0.0 - 255.255.255.255``)" + + _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer") + _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name( + "full IPv6 address" + ) + _short_ipv6_address = ( + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + + "::" + + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6)) + ).set_name("short IPv6 address") + _short_ipv6_address.add_condition( + lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8 + ) + _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address") + ipv6_address = Combine( + (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name( + "IPv6 address" + ) + ).set_name("IPv6 address") + "IPv6 address (long, short, or mixed form)" + + mac_address = Regex( + r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}" + ).set_name("MAC address") + "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)" + + @staticmethod + def convert_to_date(fmt: str = "%Y-%m-%d"): + """ + Helper to create a parse action for converting parsed date string to Python datetime.date + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``) + + Example:: + + date_expr = pyparsing_common.iso8601_date.copy() + date_expr.setParseAction(pyparsing_common.convertToDate()) + print(date_expr.parseString("1999-12-31")) + + prints:: + + [datetime.date(1999, 12, 31)] + """ + + def cvt_fn(ss, ll, tt): + try: + return datetime.strptime(tt[0], fmt).date() + except ValueError as ve: + raise ParseException(ss, ll, str(ve)) + + return cvt_fn + + @staticmethod + def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"): + """Helper to create a parse action for converting parsed + datetime string to Python datetime.datetime + + Params - + - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``) + + Example:: + + dt_expr = pyparsing_common.iso8601_datetime.copy() + dt_expr.setParseAction(pyparsing_common.convertToDatetime()) + print(dt_expr.parseString("1999-12-31T23:59:59.999")) + + prints:: + + [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)] + """ + + def cvt_fn(s, l, t): + try: + return datetime.strptime(t[0], fmt) + except ValueError as ve: + raise ParseException(s, l, str(ve)) + + return cvt_fn + + iso8601_date = Regex( + r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?" + ).set_name("ISO8601 date") + "ISO8601 date (``yyyy-mm-dd``)" + + iso8601_datetime = Regex( + r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?" + ).set_name("ISO8601 datetime") + "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``" + + uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID") + "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)" + + _html_stripper = any_open_tag.suppress() | any_close_tag.suppress() + + @staticmethod + def strip_html_tags(s: str, l: int, tokens: ParseResults): + """Parse action to remove HTML tags from web page HTML source + + Example:: + + # strip HTML links from normal text + text = 'More info at the pyparsing wiki page' + td, td_end = makeHTMLTags("TD") + table_text = td + SkipTo(td_end).setParseAction(pyparsing_common.stripHTMLTags)("body") + td_end + print(table_text.parseString(text).body) + + Prints:: + + More info at the pyparsing wiki page + """ + return pyparsing_common._html_stripper.transform_string(tokens[0]) + + _commasepitem = ( + Combine( + OneOrMore( + ~Literal(",") + + ~LineEnd() + + Word(printables, exclude_chars=",") + + Opt(White(" \t") + ~FollowedBy(LineEnd() | ",")) + ) + ) + .streamline() + .set_name("commaItem") + ) + comma_separated_list = delimited_list( + Opt(quoted_string.copy() | _commasepitem, default="") + ).set_name("comma separated list") + """Predefined expression of 1 or more printable words or quoted strings, separated by commas.""" + + upcase_tokens = staticmethod(token_map(lambda t: t.upper())) + """Parse action to convert tokens to upper case.""" + + downcase_tokens = staticmethod(token_map(lambda t: t.lower())) + """Parse action to convert tokens to lower case.""" + + # fmt: off + url = Regex( + # https://mathiasbynens.be/demo/url-regex + # https://gist.github.com/dperini/729294 + r"^" + + # protocol identifier (optional) + # short syntax // still required + r"(?:(?:(?Phttps?|ftp):)?\/\/)" + + # user:pass BasicAuth (optional) + r"(?:(?P\S+(?::\S*)?)@)?" + + r"(?P" + + # IP address exclusion + # private & local networks + r"(?!(?:10|127)(?:\.\d{1,3}){3})" + + r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" + + r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" + + # IP address dotted notation octets + # excludes loopback network 0.0.0.0 + # excludes reserved space >= 224.0.0.0 + # excludes network & broadcast addresses + # (first & last IP address of each class) + r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" + + r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" + + r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" + + r"|" + + # host & domain names, may end with dot + # can be replaced by a shortest alternative + # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+ + r"(?:" + + r"(?:" + + r"[a-z0-9\u00a1-\uffff]" + + r"[a-z0-9\u00a1-\uffff_-]{0,62}" + + r")?" + + r"[a-z0-9\u00a1-\uffff]\." + + r")+" + + # TLD identifier name, may end with dot + r"(?:[a-z\u00a1-\uffff]{2,}\.?)" + + r")" + + # port number (optional) + r"(:(?P\d{2,5}))?" + + # resource path (optional) + r"(?P\/[^?# ]*)?" + + # query string (optional) + r"(\?(?P[^#]*))?" + + # fragment (optional) + r"(#(?P\S*))?" + + r"$" + ).set_name("url") + # fmt: on + + # pre-PEP8 compatibility names + convertToInteger = convert_to_integer + convertToFloat = convert_to_float + convertToDate = convert_to_date + convertToDatetime = convert_to_datetime + stripHTMLTags = strip_html_tags + upcaseTokens = upcase_tokens + downcaseTokens = downcase_tokens + + +_builtin_exprs = [ + v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement) +] diff --git a/src/poetry/core/_vendor/pyparsing/core.py b/src/poetry/core/_vendor/pyparsing/core.py new file mode 100644 index 0000000..9acba3f --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/core.py @@ -0,0 +1,5814 @@ +# +# core.py +# +import os +import typing +from typing import ( + NamedTuple, + Union, + Callable, + Any, + Generator, + Tuple, + List, + TextIO, + Set, + Sequence, +) +from abc import ABC, abstractmethod +from enum import Enum +import string +import copy +import warnings +import re +import sys +from collections.abc import Iterable +import traceback +import types +from operator import itemgetter +from functools import wraps +from threading import RLock +from pathlib import Path + +from .util import ( + _FifoCache, + _UnboundedCache, + __config_flags, + _collapse_string_to_ranges, + _escape_regex_range_chars, + _bslash, + _flatten, + LRUMemo as _LRUMemo, + UnboundedMemo as _UnboundedMemo, +) +from .exceptions import * +from .actions import * +from .results import ParseResults, _ParseResultsWithOffset +from .unicode import pyparsing_unicode + +_MAX_INT = sys.maxsize +str_type: Tuple[type, ...] = (str, bytes) + +# +# Copyright (c) 2003-2022 Paul T. McGuire +# +# Permission is hereby granted, free of charge, to any person obtaining +# a copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, +# distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to +# the following conditions: +# +# The above copyright notice and this permission notice shall be +# included in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY +# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, +# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE +# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# + + +if sys.version_info >= (3, 8): + from functools import cached_property +else: + + class cached_property: + def __init__(self, func): + self._func = func + + def __get__(self, instance, owner=None): + ret = instance.__dict__[self._func.__name__] = self._func(instance) + return ret + + +class __compat__(__config_flags): + """ + A cross-version compatibility configuration for pyparsing features that will be + released in a future version. By setting values in this configuration to True, + those features can be enabled in prior versions for compatibility development + and testing. + + - ``collect_all_And_tokens`` - flag to enable fix for Issue #63 that fixes erroneous grouping + of results names when an :class:`And` expression is nested within an :class:`Or` or :class:`MatchFirst`; + maintained for compatibility, but setting to ``False`` no longer restores pre-2.3.1 + behavior + """ + + _type_desc = "compatibility" + + collect_all_And_tokens = True + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _fixed_names = """ + collect_all_And_tokens + """.split() + + +class __diag__(__config_flags): + _type_desc = "diagnostic" + + warn_multiple_tokens_in_named_alternation = False + warn_ungrouped_named_tokens_in_collection = False + warn_name_set_on_empty_Forward = False + warn_on_parse_using_empty_Forward = False + warn_on_assignment_to_Forward = False + warn_on_multiple_string_args_to_oneof = False + warn_on_match_first_with_lshift_operator = False + enable_debug_on_named_expressions = False + + _all_names = [__ for __ in locals() if not __.startswith("_")] + _warning_names = [name for name in _all_names if name.startswith("warn")] + _debug_names = [name for name in _all_names if name.startswith("enable_debug")] + + @classmethod + def enable_all_warnings(cls) -> None: + for name in cls._warning_names: + cls.enable(name) + + +class Diagnostics(Enum): + """ + Diagnostic configuration (all default to disabled) + - ``warn_multiple_tokens_in_named_alternation`` - flag to enable warnings when a results + name is defined on a :class:`MatchFirst` or :class:`Or` expression with one or more :class:`And` subexpressions + - ``warn_ungrouped_named_tokens_in_collection`` - flag to enable warnings when a results + name is defined on a containing expression with ungrouped subexpressions that also + have results names + - ``warn_name_set_on_empty_Forward`` - flag to enable warnings when a :class:`Forward` is defined + with a results name, but has no contents defined + - ``warn_on_parse_using_empty_Forward`` - flag to enable warnings when a :class:`Forward` is + defined in a grammar but has never had an expression attached to it + - ``warn_on_assignment_to_Forward`` - flag to enable warnings when a :class:`Forward` is defined + but is overwritten by assigning using ``'='`` instead of ``'<<='`` or ``'<<'`` + - ``warn_on_multiple_string_args_to_oneof`` - flag to enable warnings when :class:`one_of` is + incorrectly called with multiple str arguments + - ``enable_debug_on_named_expressions`` - flag to auto-enable debug on all subsequent + calls to :class:`ParserElement.set_name` + + Diagnostics are enabled/disabled by calling :class:`enable_diag` and :class:`disable_diag`. + All warnings can be enabled by calling :class:`enable_all_warnings`. + """ + + warn_multiple_tokens_in_named_alternation = 0 + warn_ungrouped_named_tokens_in_collection = 1 + warn_name_set_on_empty_Forward = 2 + warn_on_parse_using_empty_Forward = 3 + warn_on_assignment_to_Forward = 4 + warn_on_multiple_string_args_to_oneof = 5 + warn_on_match_first_with_lshift_operator = 6 + enable_debug_on_named_expressions = 7 + + +def enable_diag(diag_enum: Diagnostics) -> None: + """ + Enable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.enable(diag_enum.name) + + +def disable_diag(diag_enum: Diagnostics) -> None: + """ + Disable a global pyparsing diagnostic flag (see :class:`Diagnostics`). + """ + __diag__.disable(diag_enum.name) + + +def enable_all_warnings() -> None: + """ + Enable all global pyparsing diagnostic warnings (see :class:`Diagnostics`). + """ + __diag__.enable_all_warnings() + + +# hide abstract class +del __config_flags + + +def _should_enable_warnings( + cmd_line_warn_options: typing.Iterable[str], warn_env_var: typing.Optional[str] +) -> bool: + enable = bool(warn_env_var) + for warn_opt in cmd_line_warn_options: + w_action, w_message, w_category, w_module, w_line = (warn_opt + "::::").split( + ":" + )[:5] + if not w_action.lower().startswith("i") and ( + not (w_message or w_category or w_module) or w_module == "pyparsing" + ): + enable = True + elif w_action.lower().startswith("i") and w_module in ("pyparsing", ""): + enable = False + return enable + + +if _should_enable_warnings( + sys.warnoptions, os.environ.get("PYPARSINGENABLEALLWARNINGS") +): + enable_all_warnings() + + +# build list of single arg builtins, that can be used as parse actions +_single_arg_builtins = { + sum, + len, + sorted, + reversed, + list, + tuple, + set, + any, + all, + min, + max, +} + +_generatorType = types.GeneratorType +ParseAction = Union[ + Callable[[], Any], + Callable[[ParseResults], Any], + Callable[[int, ParseResults], Any], + Callable[[str, int, ParseResults], Any], +] +ParseCondition = Union[ + Callable[[], bool], + Callable[[ParseResults], bool], + Callable[[int, ParseResults], bool], + Callable[[str, int, ParseResults], bool], +] +ParseFailAction = Callable[[str, int, "ParserElement", Exception], None] +DebugStartAction = Callable[[str, int, "ParserElement", bool], None] +DebugSuccessAction = Callable[ + [str, int, int, "ParserElement", ParseResults, bool], None +] +DebugExceptionAction = Callable[[str, int, "ParserElement", Exception, bool], None] + + +alphas = string.ascii_uppercase + string.ascii_lowercase +identchars = pyparsing_unicode.Latin1.identchars +identbodychars = pyparsing_unicode.Latin1.identbodychars +nums = "0123456789" +hexnums = nums + "ABCDEFabcdef" +alphanums = alphas + nums +printables = "".join([c for c in string.printable if c not in string.whitespace]) + +_trim_arity_call_line: traceback.StackSummary = None + + +def _trim_arity(func, max_limit=3): + """decorator to trim function calls to match the arity of the target""" + global _trim_arity_call_line + + if func in _single_arg_builtins: + return lambda s, l, t: func(t) + + limit = 0 + found_arity = False + + def extract_tb(tb, limit=0): + frames = traceback.extract_tb(tb, limit=limit) + frame_summary = frames[-1] + return [frame_summary[:2]] + + # synthesize what would be returned by traceback.extract_stack at the call to + # user's parse action 'func', so that we don't incur call penalty at parse time + + # fmt: off + LINE_DIFF = 7 + # IF ANY CODE CHANGES, EVEN JUST COMMENTS OR BLANK LINES, BETWEEN THE NEXT LINE AND + # THE CALL TO FUNC INSIDE WRAPPER, LINE_DIFF MUST BE MODIFIED!!!! + _trim_arity_call_line = (_trim_arity_call_line or traceback.extract_stack(limit=2)[-1]) + pa_call_line_synth = (_trim_arity_call_line[0], _trim_arity_call_line[1] + LINE_DIFF) + + def wrapper(*args): + nonlocal found_arity, limit + while 1: + try: + ret = func(*args[limit:]) + found_arity = True + return ret + except TypeError as te: + # re-raise TypeErrors if they did not come from our arity testing + if found_arity: + raise + else: + tb = te.__traceback__ + trim_arity_type_error = ( + extract_tb(tb, limit=2)[-1][:2] == pa_call_line_synth + ) + del tb + + if trim_arity_type_error: + if limit < max_limit: + limit += 1 + continue + + raise + # fmt: on + + # copy func name to wrapper for sensible debug output + # (can't use functools.wraps, since that messes with function signature) + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + wrapper.__name__ = func_name + wrapper.__doc__ = func.__doc__ + + return wrapper + + +def condition_as_parse_action( + fn: ParseCondition, message: str = None, fatal: bool = False +) -> ParseAction: + """ + Function to convert a simple predicate function that returns ``True`` or ``False`` + into a parse action. Can be used in places when a parse action is required + and :class:`ParserElement.add_condition` cannot be used (such as when adding a condition + to an operator level in :class:`infix_notation`). + + Optional keyword arguments: + + - ``message`` - define a custom message to be used in the raised exception + - ``fatal`` - if True, will raise :class:`ParseFatalException` to stop parsing immediately; + otherwise will raise :class:`ParseException` + + """ + msg = message if message is not None else "failed user-defined condition" + exc_type = ParseFatalException if fatal else ParseException + fn = _trim_arity(fn) + + @wraps(fn) + def pa(s, l, t): + if not bool(fn(s, l, t)): + raise exc_type(s, l, msg) + + return pa + + +def _default_start_debug_action( + instring: str, loc: int, expr: "ParserElement", cache_hit: bool = False +): + cache_hit_str = "*" if cache_hit else "" + print( + ( + "{}Match {} at loc {}({},{})\n {}\n {}^".format( + cache_hit_str, + expr, + loc, + lineno(loc, instring), + col(loc, instring), + line(loc, instring), + " " * (col(loc, instring) - 1), + ) + ) + ) + + +def _default_success_debug_action( + instring: str, + startloc: int, + endloc: int, + expr: "ParserElement", + toks: ParseResults, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print("{}Matched {} -> {}".format(cache_hit_str, expr, toks.as_list())) + + +def _default_exception_debug_action( + instring: str, + loc: int, + expr: "ParserElement", + exc: Exception, + cache_hit: bool = False, +): + cache_hit_str = "*" if cache_hit else "" + print( + "{}Match {} failed, {} raised: {}".format( + cache_hit_str, expr, type(exc).__name__, exc + ) + ) + + +def null_debug_action(*args): + """'Do-nothing' debug action, to suppress debugging output during parsing.""" + + +class ParserElement(ABC): + """Abstract base level parser element class.""" + + DEFAULT_WHITE_CHARS: str = " \n\t\r" + verbose_stacktrace: bool = False + _literalStringClass: typing.Optional[type] = None + + @staticmethod + def set_default_whitespace_chars(chars: str) -> None: + r""" + Overrides the default whitespace chars + + Example:: + + # default whitespace chars are space, and newline + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def', 'ghi', 'jkl'] + + # change to just treat newline as significant + ParserElement.set_default_whitespace_chars(" \t") + Word(alphas)[1, ...].parse_string("abc def\nghi jkl") # -> ['abc', 'def'] + """ + ParserElement.DEFAULT_WHITE_CHARS = chars + + # update whitespace all parse expressions defined in this module + for expr in _builtin_exprs: + if expr.copyDefaultWhiteChars: + expr.whiteChars = set(chars) + + @staticmethod + def inline_literals_using(cls: type) -> None: + """ + Set class to be used for inclusion of string literals into a parser. + + Example:: + + # default literal class used is Literal + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '/', '12', '/', '31'] + + + # change to Suppress + ParserElement.inline_literals_using(Suppress) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + date_str.parse_string("1999/12/31") # -> ['1999', '12', '31'] + """ + ParserElement._literalStringClass = cls + + class DebugActions(NamedTuple): + debug_try: typing.Optional[DebugStartAction] + debug_match: typing.Optional[DebugSuccessAction] + debug_fail: typing.Optional[DebugExceptionAction] + + def __init__(self, savelist: bool = False): + self.parseAction: List[ParseAction] = list() + self.failAction: typing.Optional[ParseFailAction] = None + self.customName = None + self._defaultName = None + self.resultsName = None + self.saveAsList = savelist + self.skipWhitespace = True + self.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + self.copyDefaultWhiteChars = True + # used when checking for left-recursion + self.mayReturnEmpty = False + self.keepTabs = False + self.ignoreExprs: List["ParserElement"] = list() + self.debug = False + self.streamlined = False + # optimize exception handling for subclasses that don't advance parse index + self.mayIndexError = True + self.errmsg = "" + # mark results names as modal (report only last) or cumulative (list all) + self.modalResults = True + # custom debug actions + self.debugActions = self.DebugActions(None, None, None) + # avoid redundant calls to preParse + self.callPreparse = True + self.callDuringTry = False + self.suppress_warnings_: List[Diagnostics] = [] + + def suppress_warning(self, warning_type: Diagnostics) -> "ParserElement": + """ + Suppress warnings emitted for a particular diagnostic on this expression. + + Example:: + + base = pp.Forward() + base.suppress_warning(Diagnostics.warn_on_parse_using_empty_Forward) + + # statement would normally raise a warning, but is now suppressed + print(base.parseString("x")) + + """ + self.suppress_warnings_.append(warning_type) + return self + + def copy(self) -> "ParserElement": + """ + Make a copy of this :class:`ParserElement`. Useful for defining + different parse actions for the same parsing pattern, using copies of + the original parse element. + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + integerK = integer.copy().add_parse_action(lambda toks: toks[0] * 1024) + Suppress("K") + integerM = integer.copy().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + + print((integerK | integerM | integer)[1, ...].parse_string("5K 100 640K 256M")) + + prints:: + + [5120, 100, 655360, 268435456] + + Equivalent form of ``expr.copy()`` is just ``expr()``:: + + integerM = integer().add_parse_action(lambda toks: toks[0] * 1024 * 1024) + Suppress("M") + """ + cpy = copy.copy(self) + cpy.parseAction = self.parseAction[:] + cpy.ignoreExprs = self.ignoreExprs[:] + if self.copyDefaultWhiteChars: + cpy.whiteChars = set(ParserElement.DEFAULT_WHITE_CHARS) + return cpy + + def set_results_name( + self, name: str, list_all_matches: bool = False, *, listAllMatches: bool = False + ) -> "ParserElement": + """ + Define name for referencing matching tokens as a nested attribute + of the returned parse results. + + Normally, results names are assigned as you would assign keys in a dict: + any existing value is overwritten by later values. If it is necessary to + keep all values captured for a particular results name, call ``set_results_name`` + with ``list_all_matches`` = True. + + NOTE: ``set_results_name`` returns a *copy* of the original :class:`ParserElement` object; + this is so that the client can define a basic element, such as an + integer, and reference it in multiple places with different names. + + You can also set results names using the abbreviated syntax, + ``expr("name")`` in place of ``expr.set_results_name("name")`` + - see :class:`__call__`. If ``list_all_matches`` is required, use + ``expr("name*")``. + + Example:: + + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + + # equivalent form: + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + """ + listAllMatches = listAllMatches or list_all_matches + return self._setResultsName(name, listAllMatches) + + def _setResultsName(self, name, listAllMatches=False): + if name is None: + return self + newself = self.copy() + if name.endswith("*"): + name = name[:-1] + listAllMatches = True + newself.resultsName = name + newself.modalResults = not listAllMatches + return newself + + def set_break(self, break_flag: bool = True) -> "ParserElement": + """ + Method to invoke the Python pdb debugger when this element is + about to be parsed. Set ``break_flag`` to ``True`` to enable, ``False`` to + disable. + """ + if break_flag: + _parseMethod = self._parse + + def breaker(instring, loc, doActions=True, callPreParse=True): + import pdb + + # this call to pdb.set_trace() is intentional, not a checkin error + pdb.set_trace() + return _parseMethod(instring, loc, doActions, callPreParse) + + breaker._originalParseMethod = _parseMethod + self._parse = breaker + else: + if hasattr(self._parse, "_originalParseMethod"): + self._parse = self._parse._originalParseMethod + return self + + def set_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Define one or more actions to perform when successfully matching parse element definition. + + Parse actions can be called to perform data conversions, do extra validation, + update external data structures, or enhance or replace the parsed tokens. + Each parse action ``fn`` is a callable method with 0-3 arguments, called as + ``fn(s, loc, toks)`` , ``fn(loc, toks)`` , ``fn(toks)`` , or just ``fn()`` , where: + + - s = the original string being parsed (see note below) + - loc = the location of the matching substring + - toks = a list of the matched tokens, packaged as a :class:`ParseResults` object + + The parsed tokens are passed to the parse action as ParseResults. They can be + modified in place using list-style append, extend, and pop operations to update + the parsed list elements; and with dictionary-style item set and del operations + to add, update, or remove any named results. If the tokens are modified in place, + it is not necessary to return them with a return statement. + + Parse actions can also completely replace the given tokens, with another ``ParseResults`` + object, or with some entirely different object (common for parse actions that perform data + conversions). A convenient way to build a new parse result is to define the values + using a dict, and then create the return value using :class:`ParseResults.from_dict`. + + If None is passed as the ``fn`` parse action, all previously added parse actions for this + expression are cleared. + + Optional keyword arguments: + + - call_during_try = (default= ``False``) indicate if parse action should be run during + lookaheads and alternate testing. For parse actions that have side effects, it is + important to only call the parse action once it is determined that it is being + called as part of a successful parse. For parse actions that perform additional + validation, then call_during_try should be passed as True, so that the validation + code is included in the preliminary "try" parses. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`parse_string` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + + Example:: + + # parse dates in the form YYYY/MM/DD + + # use parse action to convert toks from str to int at parse time + def convert_to_int(toks): + return int(toks[0]) + + # use a parse action to verify that the date is a valid date + def is_valid_date(instring, loc, toks): + from datetime import date + year, month, day = toks[::2] + try: + date(year, month, day) + except ValueError: + raise ParseException(instring, loc, "invalid date given") + + integer = Word(nums) + date_str = integer + '/' + integer + '/' + integer + + # add parse actions + integer.set_parse_action(convert_to_int) + date_str.set_parse_action(is_valid_date) + + # note that integer fields are now ints, not strings + date_str.run_tests(''' + # successful parse - note that integer fields were converted to ints + 1999/12/31 + + # fail - invalid date + 1999/13/31 + ''') + """ + if list(fns) == [None]: + self.parseAction = [] + else: + if not all(callable(fn) for fn in fns): + raise TypeError("parse actions must be callable") + self.parseAction = [_trim_arity(fn) for fn in fns] + self.callDuringTry = kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_parse_action(self, *fns: ParseAction, **kwargs) -> "ParserElement": + """ + Add one or more parse actions to expression's list of parse actions. See :class:`set_parse_action`. + + See examples in :class:`copy`. + """ + self.parseAction += [_trim_arity(fn) for fn in fns] + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def add_condition(self, *fns: ParseCondition, **kwargs) -> "ParserElement": + """Add a boolean predicate function to expression's list of parse actions. See + :class:`set_parse_action` for function call signatures. Unlike ``set_parse_action``, + functions passed to ``add_condition`` need to return boolean success/fail of the condition. + + Optional keyword arguments: + + - message = define a custom message to be used in the raised exception + - fatal = if True, will raise ParseFatalException to stop parsing immediately; otherwise will raise + ParseException + - call_during_try = boolean to indicate if this method should be called during internal tryParse calls, + default=False + + Example:: + + integer = Word(nums).set_parse_action(lambda toks: int(toks[0])) + year_int = integer.copy() + year_int.add_condition(lambda toks: toks[0] >= 2000, message="Only support years 2000 and later") + date_str = year_int + '/' + integer + '/' + integer + + result = date_str.parse_string("1999/12/31") # -> Exception: Only support years 2000 and later (at char 0), + (line:1, col:1) + """ + for fn in fns: + self.parseAction.append( + condition_as_parse_action( + fn, message=kwargs.get("message"), fatal=kwargs.get("fatal", False) + ) + ) + + self.callDuringTry = self.callDuringTry or kwargs.get( + "call_during_try", kwargs.get("callDuringTry", False) + ) + return self + + def set_fail_action(self, fn: ParseFailAction) -> "ParserElement": + """ + Define action to perform if parsing fails at this expression. + Fail acton fn is a callable function that takes the arguments + ``fn(s, loc, expr, err)`` where: + + - s = string being parsed + - loc = location where expression match was attempted and failed + - expr = the parse expression that failed + - err = the exception thrown + + The function returns no value. It may throw :class:`ParseFatalException` + if it is desired to stop parsing immediately.""" + self.failAction = fn + return self + + def _skipIgnorables(self, instring, loc): + exprsFound = True + while exprsFound: + exprsFound = False + for e in self.ignoreExprs: + try: + while 1: + loc, dummy = e._parse(instring, loc) + exprsFound = True + except ParseException: + pass + return loc + + def preParse(self, instring, loc): + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + + if self.skipWhitespace: + instrlen = len(instring) + white_chars = self.whiteChars + while loc < instrlen and instring[loc] in white_chars: + loc += 1 + + return loc + + def parseImpl(self, instring, loc, doActions=True): + return loc, [] + + def postParse(self, instring, loc, tokenlist): + return tokenlist + + # @profile + def _parseNoCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + TRY, MATCH, FAIL = 0, 1, 2 + debugging = self.debug # and doActions) + len_instring = len(instring) + + if debugging or self.failAction: + # print("Match {} at loc {}({}, {})".format(self, loc, lineno(loc, instring), col(loc, instring))) + try: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.debugActions.debug_try: + self.debugActions.debug_try(instring, tokens_start, self, False) + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except Exception as err: + # print("Exception raised:", err) + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + if self.failAction: + self.failAction(instring, tokens_start, self, err) + raise + else: + if callPreParse and self.callPreparse: + pre_loc = self.preParse(instring, loc) + else: + pre_loc = loc + tokens_start = pre_loc + if self.mayIndexError or pre_loc >= len_instring: + try: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + except IndexError: + raise ParseException(instring, len_instring, self.errmsg, self) + else: + loc, tokens = self.parseImpl(instring, pre_loc, doActions) + + tokens = self.postParse(instring, loc, tokens) + + ret_tokens = ParseResults( + tokens, self.resultsName, asList=self.saveAsList, modal=self.modalResults + ) + if self.parseAction and (doActions or self.callDuringTry): + if debugging: + try: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + except Exception as err: + # print "Exception raised in user parse action:", err + if self.debugActions.debug_fail: + self.debugActions.debug_fail( + instring, tokens_start, self, err, False + ) + raise + else: + for fn in self.parseAction: + try: + tokens = fn(instring, tokens_start, ret_tokens) + except IndexError as parse_action_exc: + exc = ParseException("exception raised in parse action") + raise exc from parse_action_exc + + if tokens is not None and tokens is not ret_tokens: + ret_tokens = ParseResults( + tokens, + self.resultsName, + asList=self.saveAsList + and isinstance(tokens, (ParseResults, list)), + modal=self.modalResults, + ) + if debugging: + # print("Matched", self, "->", ret_tokens.as_list()) + if self.debugActions.debug_match: + self.debugActions.debug_match( + instring, tokens_start, loc, self, ret_tokens, False + ) + + return loc, ret_tokens + + def try_parse(self, instring: str, loc: int, raise_fatal: bool = False) -> int: + try: + return self._parse(instring, loc, doActions=False)[0] + except ParseFatalException: + if raise_fatal: + raise + raise ParseException(instring, loc, self.errmsg, self) + + def can_parse_next(self, instring: str, loc: int) -> bool: + try: + self.try_parse(instring, loc) + except (ParseException, IndexError): + return False + else: + return True + + # cache for left-recursion in Forward references + recursion_lock = RLock() + recursion_memos: typing.Dict[ + Tuple[int, "Forward", bool], Tuple[int, Union[ParseResults, Exception]] + ] = {} + + # argument cache for optimizing repeated calls when backtracking through recursive expressions + packrat_cache = ( + {} + ) # this is set later by enabled_packrat(); this is here so that reset_cache() doesn't fail + packrat_cache_lock = RLock() + packrat_cache_stats = [0, 0] + + # this method gets repeatedly called during backtracking with the same arguments - + # we can cache these arguments and save ourselves the trouble of re-parsing the contained expression + def _parseCache( + self, instring, loc, doActions=True, callPreParse=True + ) -> Tuple[int, ParseResults]: + HIT, MISS = 0, 1 + TRY, MATCH, FAIL = 0, 1, 2 + lookup = (self, instring, loc, callPreParse, doActions) + with ParserElement.packrat_cache_lock: + cache = ParserElement.packrat_cache + value = cache.get(lookup) + if value is cache.not_in_cache: + ParserElement.packrat_cache_stats[MISS] += 1 + try: + value = self._parseNoCache(instring, loc, doActions, callPreParse) + except ParseBaseException as pe: + # cache a copy of the exception, without the traceback + cache.set(lookup, pe.__class__(*pe.args)) + raise + else: + cache.set(lookup, (value[0], value[1].copy(), loc)) + return value + else: + ParserElement.packrat_cache_stats[HIT] += 1 + if self.debug and self.debugActions.debug_try: + try: + self.debugActions.debug_try(instring, loc, self, cache_hit=True) + except TypeError: + pass + if isinstance(value, Exception): + if self.debug and self.debugActions.debug_fail: + try: + self.debugActions.debug_fail( + instring, loc, self, value, cache_hit=True + ) + except TypeError: + pass + raise value + + loc_, result, endloc = value[0], value[1].copy(), value[2] + if self.debug and self.debugActions.debug_match: + try: + self.debugActions.debug_match( + instring, loc_, endloc, self, result, cache_hit=True + ) + except TypeError: + pass + + return loc_, result + + _parse = _parseNoCache + + @staticmethod + def reset_cache() -> None: + ParserElement.packrat_cache.clear() + ParserElement.packrat_cache_stats[:] = [0] * len( + ParserElement.packrat_cache_stats + ) + ParserElement.recursion_memos.clear() + + _packratEnabled = False + _left_recursion_enabled = False + + @staticmethod + def disable_memoization() -> None: + """ + Disables active Packrat or Left Recursion parsing and their memoization + + This method also works if neither Packrat nor Left Recursion are enabled. + This makes it safe to call before activating Packrat nor Left Recursion + to clear any previous settings. + """ + ParserElement.reset_cache() + ParserElement._left_recursion_enabled = False + ParserElement._packratEnabled = False + ParserElement._parse = ParserElement._parseNoCache + + @staticmethod + def enable_left_recursion( + cache_size_limit: typing.Optional[int] = None, *, force=False + ) -> None: + """ + Enables "bounded recursion" parsing, which allows for both direct and indirect + left-recursion. During parsing, left-recursive :class:`Forward` elements are + repeatedly matched with a fixed recursion depth that is gradually increased + until finding the longest match. + + Example:: + + import pyparsing as pp + pp.ParserElement.enable_left_recursion() + + E = pp.Forward("E") + num = pp.Word(pp.nums) + # match `num`, or `num '+' num`, or `num '+' num '+' num`, ... + E <<= E + '+' - num | num + + print(E.parse_string("1+2+3")) + + Recursion search naturally memoizes matches of ``Forward`` elements and may + thus skip reevaluation of parse actions during backtracking. This may break + programs with parse actions which rely on strict ordering of side-effects. + + Parameters: + + - cache_size_limit - (default=``None``) - memoize at most this many + ``Forward`` elements during matching; if ``None`` (the default), + memoize all ``Forward`` elements. + + Bounded Recursion parsing works similar but not identical to Packrat parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._packratEnabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if cache_size_limit is None: + ParserElement.recursion_memos = _UnboundedMemo() + elif cache_size_limit > 0: + ParserElement.recursion_memos = _LRUMemo(capacity=cache_size_limit) + else: + raise NotImplementedError("Memo size of %s" % cache_size_limit) + ParserElement._left_recursion_enabled = True + + @staticmethod + def enable_packrat(cache_size_limit: int = 128, *, force: bool = False) -> None: + """ + Enables "packrat" parsing, which adds memoizing to the parsing logic. + Repeated parse attempts at the same string location (which happens + often in many complex grammars) can immediately return a cached value, + instead of re-executing parsing/validating code. Memoizing is done of + both valid results and parsing exceptions. + + Parameters: + + - cache_size_limit - (default= ``128``) - if an integer value is provided + will limit the size of the packrat cache; if None is passed, then + the cache size will be unbounded; if 0 is passed, the cache will + be effectively disabled. + + This speedup may break existing programs that use parse actions that + have side-effects. For this reason, packrat parsing is disabled when + you first import pyparsing. To activate the packrat feature, your + program must call the class method :class:`ParserElement.enable_packrat`. + For best results, call ``enable_packrat()`` immediately after + importing pyparsing. + + Example:: + + import pyparsing + pyparsing.ParserElement.enable_packrat() + + Packrat parsing works similar but not identical to Bounded Recursion parsing, + thus the two cannot be used together. Use ``force=True`` to disable any + previous, conflicting settings. + """ + if force: + ParserElement.disable_memoization() + elif ParserElement._left_recursion_enabled: + raise RuntimeError("Packrat and Bounded Recursion are not compatible") + if not ParserElement._packratEnabled: + ParserElement._packratEnabled = True + if cache_size_limit is None: + ParserElement.packrat_cache = _UnboundedCache() + else: + ParserElement.packrat_cache = _FifoCache(cache_size_limit) + ParserElement._parse = ParserElement._parseCache + + def parse_string( + self, instring: str, parse_all: bool = False, *, parseAll: bool = False + ) -> ParseResults: + """ + Parse a string with respect to the parser definition. This function is intended as the primary interface to the + client code. + + :param instring: The input string to be parsed. + :param parse_all: If set, the entire input string must match the grammar. + :param parseAll: retained for pre-PEP8 compatibility, will be removed in a future release. + :raises ParseException: Raised if ``parse_all`` is set and the input string does not match the whole grammar. + :returns: the parsed data as a :class:`ParseResults` object, which may be accessed as a `list`, a `dict`, or + an object with attributes if the given parser includes results names. + + If the input string is required to match the entire grammar, ``parse_all`` flag must be set to ``True``. This + is also equivalent to ending the grammar with :class:`StringEnd`(). + + To report proper column numbers, ``parse_string`` operates on a copy of the input string where all tabs are + converted to spaces (8 spaces per tab, as per the default in ``string.expandtabs``). If the input string + contains tabs and the grammar uses parse actions that use the ``loc`` argument to index into the string + being parsed, one can ensure a consistent view of the input string by doing one of the following: + + - calling ``parse_with_tabs`` on your grammar before calling ``parse_string`` (see :class:`parse_with_tabs`), + - define your parse action using the full ``(s,loc,toks)`` signature, and reference the input string using the + parse action's ``s`` argument, or + - explicitly expand the tabs in your input string before calling ``parse_string``. + + Examples: + + By default, partial matches are OK. + + >>> res = Word('a').parse_string('aaaaabaaa') + >>> print(res) + ['aaaaa'] + + The parsing behavior varies by the inheriting class of this abstract class. Please refer to the children + directly to see more examples. + + It raises an exception if parse_all flag is set and instring does not match the whole grammar. + + >>> res = Word('a').parse_string('aaaaabaaa', parse_all=True) + Traceback (most recent call last): + ... + pyparsing.ParseException: Expected end of text, found 'b' (at char 5), (line:1, col:6) + """ + parseAll = parse_all or parseAll + + ParserElement.reset_cache() + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + if not self.keepTabs: + instring = instring.expandtabs() + try: + loc, tokens = self._parse(instring, 0) + if parseAll: + loc = self.preParse(instring, loc) + se = Empty() + StringEnd() + se._parse(instring, loc) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clearing out pyparsing internal stack trace + raise exc.with_traceback(None) + else: + return tokens + + def scan_string( + self, + instring: str, + max_matches: int = _MAX_INT, + overlap: bool = False, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> Generator[Tuple[ParseResults, int, int], None, None]: + """ + Scan the input string for expression matches. Each match will return the + matching tokens, start location, and end location. May be called with optional + ``max_matches`` argument, to clip scanning after 'n' matches are found. If + ``overlap`` is specified, then overlapping matches will be reported. + + Note that the start and end locations are reported relative to the string + being parsed. See :class:`parse_string` for more information on parsing + strings with embedded tabs. + + Example:: + + source = "sldjf123lsdjjkf345sldkjf879lkjsfd987" + print(source) + for tokens, start, end in Word(alphas).scan_string(source): + print(' '*start + '^'*(end-start)) + print(' '*start + tokens[0]) + + prints:: + + sldjf123lsdjjkf345sldkjf879lkjsfd987 + ^^^^^ + sldjf + ^^^^^^^ + lsdjjkf + ^^^^^^ + sldkjf + ^^^^^^ + lkjsfd + """ + maxMatches = min(maxMatches, max_matches) + if not self.streamlined: + self.streamline() + for e in self.ignoreExprs: + e.streamline() + + if not self.keepTabs: + instring = str(instring).expandtabs() + instrlen = len(instring) + loc = 0 + preparseFn = self.preParse + parseFn = self._parse + ParserElement.resetCache() + matches = 0 + try: + while loc <= instrlen and matches < maxMatches: + try: + preloc = preparseFn(instring, loc) + nextLoc, tokens = parseFn(instring, preloc, callPreParse=False) + except ParseException: + loc = preloc + 1 + else: + if nextLoc > loc: + matches += 1 + if debug: + print( + { + "tokens": tokens.asList(), + "start": preloc, + "end": nextLoc, + } + ) + yield tokens, preloc, nextLoc + if overlap: + nextloc = preparseFn(instring, loc) + if nextloc > loc: + loc = nextLoc + else: + loc += 1 + else: + loc = nextLoc + else: + loc = preloc + 1 + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def transform_string(self, instring: str, *, debug: bool = False) -> str: + """ + Extension to :class:`scan_string`, to modify matching text with modified tokens that may + be returned from a parse action. To use ``transform_string``, define a grammar and + attach a parse action to it that modifies the returned token list. + Invoking ``transform_string()`` on a target string will then scan for matches, + and replace the matched text patterns according to the logic in the parse + action. ``transform_string()`` returns the resulting transformed string. + + Example:: + + wd = Word(alphas) + wd.set_parse_action(lambda toks: toks[0].title()) + + print(wd.transform_string("now is the winter of our discontent made glorious summer by this sun of york.")) + + prints:: + + Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York. + """ + out: List[str] = [] + lastE = 0 + # force preservation of s, to minimize unwanted transformation of string, and to + # keep string locs straight between transform_string and scan_string + self.keepTabs = True + try: + for t, s, e in self.scan_string(instring, debug=debug): + out.append(instring[lastE:s]) + if t: + if isinstance(t, ParseResults): + out += t.as_list() + elif isinstance(t, Iterable) and not isinstance(t, str_type): + out.extend(t) + else: + out.append(t) + lastE = e + out.append(instring[lastE:]) + out = [o for o in out if o] + return "".join([str(s) for s in _flatten(out)]) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def search_string( + self, + instring: str, + max_matches: int = _MAX_INT, + *, + debug: bool = False, + maxMatches: int = _MAX_INT, + ) -> ParseResults: + """ + Another extension to :class:`scan_string`, simplifying the access to the tokens found + to match the given parse expression. May be called with optional + ``max_matches`` argument, to clip searching after 'n' matches are found. + + Example:: + + # a capitalized word starts with an uppercase letter, followed by zero or more lowercase letters + cap_word = Word(alphas.upper(), alphas.lower()) + + print(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity")) + + # the sum() builtin can be used to merge results into a single ParseResults object + print(sum(cap_word.search_string("More than Iron, more than Lead, more than Gold I need Electricity"))) + + prints:: + + [['More'], ['Iron'], ['Lead'], ['Gold'], ['I'], ['Electricity']] + ['More', 'Iron', 'Lead', 'Gold', 'I', 'Electricity'] + """ + maxMatches = min(maxMatches, max_matches) + try: + return ParseResults( + [t for t, s, e in self.scan_string(instring, maxMatches, debug=debug)] + ) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def split( + self, + instring: str, + maxsplit: int = _MAX_INT, + include_separators: bool = False, + *, + includeSeparators=False, + ) -> Generator[str, None, None]: + """ + Generator method to split a string using the given expression as a separator. + May be called with optional ``maxsplit`` argument, to limit the number of splits; + and the optional ``include_separators`` argument (default= ``False``), if the separating + matching text should be included in the split results. + + Example:: + + punc = one_of(list(".,;:/-!?")) + print(list(punc.split("This, this?, this sentence, is badly punctuated!"))) + + prints:: + + ['This', ' this', '', ' this sentence', ' is badly punctuated', ''] + """ + includeSeparators = includeSeparators or include_separators + last = 0 + for t, s, e in self.scan_string(instring, max_matches=maxsplit): + yield instring[last:s] + if includeSeparators: + yield t[0] + last = e + yield instring[last:] + + def __add__(self, other) -> "ParserElement": + """ + Implementation of ``+`` operator - returns :class:`And`. Adding strings to a :class:`ParserElement` + converts them to :class:`Literal`s by default. + + Example:: + + greet = Word(alphas) + "," + Word(alphas) + "!" + hello = "Hello, World!" + print(hello, "->", greet.parse_string(hello)) + + prints:: + + Hello, World! -> ['Hello', ',', 'World', '!'] + + ``...`` may be used as a parse expression as a short form of :class:`SkipTo`. + + Literal('start') + ... + Literal('end') + + is equivalent to: + + Literal('start') + SkipTo('end')("_skipped*") + Literal('end') + + Note that the skipped text is returned with '_skipped' as a results name, + and to support having multiple skips in the same parser, the value returned is + a list of all skipped text. + """ + if other is Ellipsis: + return _PendingSkip(self) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return And([self, other]) + + def __radd__(self, other) -> "ParserElement": + """ + Implementation of ``+`` operator when left operand is not a :class:`ParserElement` + """ + if other is Ellipsis: + return SkipTo(self)("_skipped*") + self + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other + self + + def __sub__(self, other) -> "ParserElement": + """ + Implementation of ``-`` operator, returns :class:`And` with error stop + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return self + And._ErrorStop() + other + + def __rsub__(self, other) -> "ParserElement": + """ + Implementation of ``-`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other - self + + def __mul__(self, other) -> "ParserElement": + """ + Implementation of ``*`` operator, allows use of ``expr * 3`` in place of + ``expr + expr + expr``. Expressions may also be multiplied by a 2-integer + tuple, similar to ``{min, max}`` multipliers in regular expressions. Tuples + may also include ``None`` as in: + - ``expr*(n, None)`` or ``expr*(n, )`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr*(None, n)`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr*(None, None)`` is equivalent to ``ZeroOrMore(expr)`` + - ``expr*(1, None)`` is equivalent to ``OneOrMore(expr)`` + + Note that ``expr*(None, n)`` does not raise an exception if + more than n exprs exist in the input stream; that is, + ``expr*(None, n)`` does not enforce a maximum number of expr + occurrences. If this behavior is desired, then write + ``expr*(None, n) + ~expr`` + """ + if other is Ellipsis: + other = (0, None) + elif isinstance(other, tuple) and other[:1] == (Ellipsis,): + other = ((0,) + other[1:] + (None,))[:2] + + if isinstance(other, int): + minElements, optElements = other, 0 + elif isinstance(other, tuple): + other = tuple(o if o is not Ellipsis else None for o in other) + other = (other + (None, None))[:2] + if other[0] is None: + other = (0, other[1]) + if isinstance(other[0], int) and other[1] is None: + if other[0] == 0: + return ZeroOrMore(self) + if other[0] == 1: + return OneOrMore(self) + else: + return self * other[0] + ZeroOrMore(self) + elif isinstance(other[0], int) and isinstance(other[1], int): + minElements, optElements = other + optElements -= minElements + else: + raise TypeError( + "cannot multiply ParserElement and ({}) objects".format( + ",".join(type(item).__name__ for item in other) + ) + ) + else: + raise TypeError( + "cannot multiply ParserElement and {} objects".format( + type(other).__name__ + ) + ) + + if minElements < 0: + raise ValueError("cannot multiply ParserElement by negative value") + if optElements < 0: + raise ValueError( + "second tuple value must be greater or equal to first tuple value" + ) + if minElements == optElements == 0: + return And([]) + + if optElements: + + def makeOptionalList(n): + if n > 1: + return Opt(self + makeOptionalList(n - 1)) + else: + return Opt(self) + + if minElements: + if minElements == 1: + ret = self + makeOptionalList(optElements) + else: + ret = And([self] * minElements) + makeOptionalList(optElements) + else: + ret = makeOptionalList(optElements) + else: + if minElements == 1: + ret = self + else: + ret = And([self] * minElements) + return ret + + def __rmul__(self, other) -> "ParserElement": + return self.__mul__(other) + + def __or__(self, other) -> "ParserElement": + """ + Implementation of ``|`` operator - returns :class:`MatchFirst` + """ + if other is Ellipsis: + return _PendingSkip(self, must_skip=True) + + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return MatchFirst([self, other]) + + def __ror__(self, other) -> "ParserElement": + """ + Implementation of ``|`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other | self + + def __xor__(self, other) -> "ParserElement": + """ + Implementation of ``^`` operator - returns :class:`Or` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Or([self, other]) + + def __rxor__(self, other) -> "ParserElement": + """ + Implementation of ``^`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other ^ self + + def __and__(self, other) -> "ParserElement": + """ + Implementation of ``&`` operator - returns :class:`Each` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return Each([self, other]) + + def __rand__(self, other) -> "ParserElement": + """ + Implementation of ``&`` operator when left operand is not a :class:`ParserElement` + """ + if isinstance(other, str_type): + other = self._literalStringClass(other) + if not isinstance(other, ParserElement): + raise TypeError( + "Cannot combine element of type {} with ParserElement".format( + type(other).__name__ + ) + ) + return other & self + + def __invert__(self) -> "ParserElement": + """ + Implementation of ``~`` operator - returns :class:`NotAny` + """ + return NotAny(self) + + # disable __iter__ to override legacy use of sequential access to __getitem__ to + # iterate over a sequence + __iter__ = None + + def __getitem__(self, key): + """ + use ``[]`` indexing notation as a short form for expression repetition: + + - ``expr[n]`` is equivalent to ``expr*n`` + - ``expr[m, n]`` is equivalent to ``expr*(m, n)`` + - ``expr[n, ...]`` or ``expr[n,]`` is equivalent + to ``expr*n + ZeroOrMore(expr)`` + (read as "at least n instances of ``expr``") + - ``expr[..., n]`` is equivalent to ``expr*(0, n)`` + (read as "0 to n instances of ``expr``") + - ``expr[...]`` and ``expr[0, ...]`` are equivalent to ``ZeroOrMore(expr)`` + - ``expr[1, ...]`` is equivalent to ``OneOrMore(expr)`` + + ``None`` may be used in place of ``...``. + + Note that ``expr[..., n]`` and ``expr[m, n]``do not raise an exception + if more than ``n`` ``expr``s exist in the input stream. If this behavior is + desired, then write ``expr[..., n] + ~expr``. + """ + + # convert single arg keys to tuples + try: + if isinstance(key, str_type): + key = (key,) + iter(key) + except TypeError: + key = (key, key) + + if len(key) > 2: + raise TypeError( + "only 1 or 2 index arguments supported ({}{})".format( + key[:5], "... [{}]".format(len(key)) if len(key) > 5 else "" + ) + ) + + # clip to 2 elements + ret = self * tuple(key[:2]) + return ret + + def __call__(self, name: str = None) -> "ParserElement": + """ + Shortcut for :class:`set_results_name`, with ``list_all_matches=False``. + + If ``name`` is given with a trailing ``'*'`` character, then ``list_all_matches`` will be + passed as ``True``. + + If ``name` is omitted, same as calling :class:`copy`. + + Example:: + + # these are equivalent + userdata = Word(alphas).set_results_name("name") + Word(nums + "-").set_results_name("socsecno") + userdata = Word(alphas)("name") + Word(nums + "-")("socsecno") + """ + if name is not None: + return self._setResultsName(name) + else: + return self.copy() + + def suppress(self) -> "ParserElement": + """ + Suppresses the output of this :class:`ParserElement`; useful to keep punctuation from + cluttering up returned output. + """ + return Suppress(self) + + def ignore_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Enables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. + + :param recursive: If ``True`` (the default), also enable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = True + return self + + def leave_whitespace(self, recursive: bool = True) -> "ParserElement": + """ + Disables the skipping of whitespace before matching the characters in the + :class:`ParserElement`'s defined pattern. This is normally only used internally by + the pyparsing module, but may be needed in some whitespace-sensitive grammars. + + :param recursive: If true (the default), also disable whitespace skipping in child elements (if any) + """ + self.skipWhitespace = False + return self + + def set_whitespace_chars( + self, chars: Union[Set[str], str], copy_defaults: bool = False + ) -> "ParserElement": + """ + Overrides the default whitespace chars + """ + self.skipWhitespace = True + self.whiteChars = set(chars) + self.copyDefaultWhiteChars = copy_defaults + return self + + def parse_with_tabs(self) -> "ParserElement": + """ + Overrides default behavior to expand ```` s to spaces before parsing the input string. + Must be called before ``parse_string`` when the input grammar contains elements that + match ```` characters. + """ + self.keepTabs = True + return self + + def ignore(self, other: "ParserElement") -> "ParserElement": + """ + Define expression to be ignored (e.g., comments) while doing pattern + matching; may be called repeatedly, to define multiple comment or other + ignorable patterns. + + Example:: + + patt = Word(alphas)[1, ...] + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj'] + + patt.ignore(c_style_comment) + patt.parse_string('ablaj /* comment */ lskjd') + # -> ['ablaj', 'lskjd'] + """ + import typing + + if isinstance(other, str_type): + other = Suppress(other) + + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + self.ignoreExprs.append(other) + else: + self.ignoreExprs.append(Suppress(other.copy())) + return self + + def set_debug_actions( + self, + start_action: DebugStartAction, + success_action: DebugSuccessAction, + exception_action: DebugExceptionAction, + ) -> "ParserElement": + """ + Customize display of debugging messages while doing pattern matching: + + - ``start_action`` - method to be called when an expression is about to be parsed; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, cache_hit: bool)`` + + - ``success_action`` - method to be called when an expression has successfully parsed; + should have the signature ``fn(input_string: str, start_location: int, end_location: int, expression: ParserELement, parsed_tokens: ParseResults, cache_hit: bool)`` + + - ``exception_action`` - method to be called when expression fails to parse; + should have the signature ``fn(input_string: str, location: int, expression: ParserElement, exception: Exception, cache_hit: bool)`` + """ + self.debugActions = self.DebugActions( + start_action or _default_start_debug_action, + success_action or _default_success_debug_action, + exception_action or _default_exception_debug_action, + ) + self.debug = True + return self + + def set_debug(self, flag: bool = True) -> "ParserElement": + """ + Enable display of debugging messages while doing pattern matching. + Set ``flag`` to ``True`` to enable, ``False`` to disable. + + Example:: + + wd = Word(alphas).set_name("alphaword") + integer = Word(nums).set_name("numword") + term = wd | integer + + # turn on debugging for wd + wd.set_debug() + + term[1, ...].parse_string("abc 123 xyz 890") + + prints:: + + Match alphaword at loc 0(1,1) + Matched alphaword -> ['abc'] + Match alphaword at loc 3(1,4) + Exception raised:Expected alphaword (at char 4), (line:1, col:5) + Match alphaword at loc 7(1,8) + Matched alphaword -> ['xyz'] + Match alphaword at loc 11(1,12) + Exception raised:Expected alphaword (at char 12), (line:1, col:13) + Match alphaword at loc 15(1,16) + Exception raised:Expected alphaword (at char 15), (line:1, col:16) + + The output shown is that produced by the default debug actions - custom debug actions can be + specified using :class:`set_debug_actions`. Prior to attempting + to match the ``wd`` expression, the debugging message ``"Match at loc (,)"`` + is shown. Then if the parse succeeds, a ``"Matched"`` message is shown, or an ``"Exception raised"`` + message is shown. Also note the use of :class:`set_name` to assign a human-readable name to the expression, + which makes debugging and exception messages easier to understand - for instance, the default + name created for the :class:`Word` expression without calling ``set_name`` is ``"W:(A-Za-z)"``. + """ + if flag: + self.set_debug_actions( + _default_start_debug_action, + _default_success_debug_action, + _default_exception_debug_action, + ) + else: + self.debug = False + return self + + @property + def default_name(self) -> str: + if self._defaultName is None: + self._defaultName = self._generateDefaultName() + return self._defaultName + + @abstractmethod + def _generateDefaultName(self): + """ + Child classes must define this method, which defines how the ``default_name`` is set. + """ + + def set_name(self, name: str) -> "ParserElement": + """ + Define name for this expression, makes debugging and exception messages clearer. + Example:: + Word(nums).parse_string("ABC") # -> Exception: Expected W:(0-9) (at char 0), (line:1, col:1) + Word(nums).set_name("integer").parse_string("ABC") # -> Exception: Expected integer (at char 0), (line:1, col:1) + """ + self.customName = name + self.errmsg = "Expected " + self.name + if __diag__.enable_debug_on_named_expressions: + self.set_debug() + return self + + @property + def name(self) -> str: + # This will use a user-defined name if available, but otherwise defaults back to the auto-generated name + return self.customName if self.customName is not None else self.default_name + + def __str__(self) -> str: + return self.name + + def __repr__(self) -> str: + return str(self) + + def streamline(self) -> "ParserElement": + self.streamlined = True + self._defaultName = None + return self + + def recurse(self) -> Sequence["ParserElement"]: + return [] + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.recurse(): + e._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + """ + Check defined expressions for valid structure, check for infinite recursive definitions. + """ + self._checkRecursion([]) + + def parse_file( + self, + file_or_filename: Union[str, Path, TextIO], + encoding: str = "utf-8", + parse_all: bool = False, + *, + parseAll: bool = False, + ) -> ParseResults: + """ + Execute the parse expression on the given file or filename. + If a filename is specified (instead of a file object), + the entire file is opened, read, and closed before parsing. + """ + parseAll = parseAll or parse_all + try: + file_contents = file_or_filename.read() + except AttributeError: + with open(file_or_filename, "r", encoding=encoding) as f: + file_contents = f.read() + try: + return self.parse_string(file_contents, parseAll) + except ParseBaseException as exc: + if ParserElement.verbose_stacktrace: + raise + else: + # catch and re-raise exception from here, clears out pyparsing internal stack trace + raise exc.with_traceback(None) + + def __eq__(self, other): + if self is other: + return True + elif isinstance(other, str_type): + return self.matches(other, parse_all=True) + elif isinstance(other, ParserElement): + return vars(self) == vars(other) + return False + + def __hash__(self): + return id(self) + + def matches( + self, test_string: str, parse_all: bool = True, *, parseAll: bool = True + ) -> bool: + """ + Method for quick testing of a parser against a test string. Good for simple + inline microtests of sub expressions while building up larger parser. + + Parameters: + - ``test_string`` - to test against this expression for a match + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + + Example:: + + expr = Word(nums) + assert expr.matches("100") + """ + parseAll = parseAll and parse_all + try: + self.parse_string(str(test_string), parse_all=parseAll) + return True + except ParseBaseException: + return False + + def run_tests( + self, + tests: Union[str, List[str]], + parse_all: bool = True, + comment: typing.Optional[Union["ParserElement", str]] = "#", + full_dump: bool = True, + print_results: bool = True, + failure_tests: bool = False, + post_parse: Callable[[str, ParseResults], str] = None, + file: typing.Optional[TextIO] = None, + with_line_numbers: bool = False, + *, + parseAll: bool = True, + fullDump: bool = True, + printResults: bool = True, + failureTests: bool = False, + postParse: Callable[[str, ParseResults], str] = None, + ) -> Tuple[bool, List[Tuple[str, Union[ParseResults, Exception]]]]: + """ + Execute the parse expression on a series of test strings, showing each + test, the parsed results or where the parse failed. Quick and easy way to + run a parse expression against a list of sample strings. + + Parameters: + - ``tests`` - a list of separate test strings, or a multiline string of test strings + - ``parse_all`` - (default= ``True``) - flag to pass to :class:`parse_string` when running tests + - ``comment`` - (default= ``'#'``) - expression for indicating embedded comments in the test + string; pass None to disable comment filtering + - ``full_dump`` - (default= ``True``) - dump results as list followed by results names in nested outline; + if False, only dump nested list + - ``print_results`` - (default= ``True``) prints test output to stdout + - ``failure_tests`` - (default= ``False``) indicates if these tests are expected to fail parsing + - ``post_parse`` - (default= ``None``) optional callback for successful parse results; called as + `fn(test_string, parse_results)` and returns a string to be added to the test output + - ``file`` - (default= ``None``) optional file-like object to which test output will be written; + if None, will default to ``sys.stdout`` + - ``with_line_numbers`` - default= ``False``) show test strings with line and column numbers + + Returns: a (success, results) tuple, where success indicates that all tests succeeded + (or failed if ``failure_tests`` is True), and the results contain a list of lines of each + test's output + + Example:: + + number_expr = pyparsing_common.number.copy() + + result = number_expr.run_tests(''' + # unsigned integer + 100 + # negative integer + -100 + # float with scientific notation + 6.02e23 + # integer with scientific notation + 1e-12 + ''') + print("Success" if result[0] else "Failed!") + + result = number_expr.run_tests(''' + # stray character + 100Z + # missing leading digit before '.' + -.100 + # too many '.' + 3.14.159 + ''', failure_tests=True) + print("Success" if result[0] else "Failed!") + + prints:: + + # unsigned integer + 100 + [100] + + # negative integer + -100 + [-100] + + # float with scientific notation + 6.02e23 + [6.02e+23] + + # integer with scientific notation + 1e-12 + [1e-12] + + Success + + # stray character + 100Z + ^ + FAIL: Expected end of text (at char 3), (line:1, col:4) + + # missing leading digit before '.' + -.100 + ^ + FAIL: Expected {real number with scientific notation | real number | signed integer} (at char 0), (line:1, col:1) + + # too many '.' + 3.14.159 + ^ + FAIL: Expected end of text (at char 4), (line:1, col:5) + + Success + + Each test string must be on a single line. If you want to test a string that spans multiple + lines, create a test like this:: + + expr.run_tests(r"this is a test\\n of strings that spans \\n 3 lines") + + (Note that this is a raw string literal, you must include the leading ``'r'``.) + """ + from .testing import pyparsing_test + + parseAll = parseAll and parse_all + fullDump = fullDump and full_dump + printResults = printResults and print_results + failureTests = failureTests or failure_tests + postParse = postParse or post_parse + if isinstance(tests, str_type): + line_strip = type(tests).strip + tests = [line_strip(test_line) for test_line in tests.rstrip().splitlines()] + if isinstance(comment, str_type): + comment = Literal(comment) + if file is None: + file = sys.stdout + print_ = file.write + + result: Union[ParseResults, Exception] + allResults = [] + comments = [] + success = True + NL = Literal(r"\n").add_parse_action(replace_with("\n")).ignore(quoted_string) + BOM = "\ufeff" + for t in tests: + if comment is not None and comment.matches(t, False) or comments and not t: + comments.append( + pyparsing_test.with_line_numbers(t) if with_line_numbers else t + ) + continue + if not t: + continue + out = [ + "\n" + "\n".join(comments) if comments else "", + pyparsing_test.with_line_numbers(t) if with_line_numbers else t, + ] + comments = [] + try: + # convert newline marks to actual newlines, and strip leading BOM if present + t = NL.transform_string(t.lstrip(BOM)) + result = self.parse_string(t, parse_all=parseAll) + except ParseBaseException as pe: + fatal = "(FATAL)" if isinstance(pe, ParseFatalException) else "" + out.append(pe.explain()) + out.append("FAIL: " + str(pe)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(pe.__traceback__)) + success = success and failureTests + result = pe + except Exception as exc: + out.append("FAIL-EXCEPTION: {}: {}".format(type(exc).__name__, exc)) + if ParserElement.verbose_stacktrace: + out.extend(traceback.format_tb(exc.__traceback__)) + success = success and failureTests + result = exc + else: + success = success and not failureTests + if postParse is not None: + try: + pp_value = postParse(t, result) + if pp_value is not None: + if isinstance(pp_value, ParseResults): + out.append(pp_value.dump()) + else: + out.append(str(pp_value)) + else: + out.append(result.dump()) + except Exception as e: + out.append(result.dump(full=fullDump)) + out.append( + "{} failed: {}: {}".format( + postParse.__name__, type(e).__name__, e + ) + ) + else: + out.append(result.dump(full=fullDump)) + out.append("") + + if printResults: + print_("\n".join(out)) + + allResults.append((t, result)) + + return success, allResults + + def create_diagram( + self, + output_html: Union[TextIO, Path, str], + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, + **kwargs, + ) -> None: + """ + Create a railroad diagram for the parser. + + Parameters: + - output_html (str or file-like object) - output target for generated + diagram HTML + - vertical (int) - threshold for formatting multiple alternatives vertically + instead of horizontally (default=3) + - show_results_names - bool flag whether diagram should show annotations for + defined results names + - show_groups - bool flag whether groups should be highlighted with an unlabeled surrounding box + Additional diagram-formatting keyword arguments can also be included; + see railroad.Diagram class. + """ + + try: + from .diagram import to_railroad, railroad_to_html + except ImportError as ie: + raise Exception( + "must ``pip install pyparsing[diagrams]`` to generate parser railroad diagrams" + ) from ie + + self.streamline() + + railroad = to_railroad( + self, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + diagram_kwargs=kwargs, + ) + if isinstance(output_html, (str, Path)): + with open(output_html, "w", encoding="utf-8") as diag_file: + diag_file.write(railroad_to_html(railroad)) + else: + # we were passed a file-like object, just write to it + output_html.write(railroad_to_html(railroad)) + + setDefaultWhitespaceChars = set_default_whitespace_chars + inlineLiteralsUsing = inline_literals_using + setResultsName = set_results_name + setBreak = set_break + setParseAction = set_parse_action + addParseAction = add_parse_action + addCondition = add_condition + setFailAction = set_fail_action + tryParse = try_parse + canParseNext = can_parse_next + resetCache = reset_cache + enableLeftRecursion = enable_left_recursion + enablePackrat = enable_packrat + parseString = parse_string + scanString = scan_string + searchString = search_string + transformString = transform_string + setWhitespaceChars = set_whitespace_chars + parseWithTabs = parse_with_tabs + setDebugActions = set_debug_actions + setDebug = set_debug + defaultName = default_name + setName = set_name + parseFile = parse_file + runTests = run_tests + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class _PendingSkip(ParserElement): + # internal placeholder class to hold a place were '...' is added to a parser element, + # once another ParserElement is added, this placeholder will be replaced with a SkipTo + def __init__(self, expr: ParserElement, must_skip: bool = False): + super().__init__() + self.anchor = expr + self.must_skip = must_skip + + def _generateDefaultName(self): + return str(self.anchor + Empty()).replace("Empty", "...") + + def __add__(self, other) -> "ParserElement": + skipper = SkipTo(other).set_name("...")("_skipped*") + if self.must_skip: + + def must_skip(t): + if not t._skipped or t._skipped.as_list() == [""]: + del t[0] + t.pop("_skipped", None) + + def show_skip(t): + if t._skipped.as_list()[-1:] == [""]: + t.pop("_skipped") + t["_skipped"] = "missing <" + repr(self.anchor) + ">" + + return ( + self.anchor + skipper().add_parse_action(must_skip) + | skipper().add_parse_action(show_skip) + ) + other + + return self.anchor + skipper + other + + def __repr__(self): + return self.defaultName + + def parseImpl(self, *args): + raise Exception( + "use of `...` expression without following SkipTo target expression" + ) + + +class Token(ParserElement): + """Abstract :class:`ParserElement` subclass, for defining atomic + matching patterns. + """ + + def __init__(self): + super().__init__(savelist=False) + + def _generateDefaultName(self): + return type(self).__name__ + + +class Empty(Token): + """ + An empty token, will always match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class NoMatch(Token): + """ + A token that will never match. + """ + + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + self.errmsg = "Unmatchable token" + + def parseImpl(self, instring, loc, doActions=True): + raise ParseException(instring, loc, self.errmsg, self) + + +class Literal(Token): + """ + Token to exactly match a specified string. + + Example:: + + Literal('blah').parse_string('blah') # -> ['blah'] + Literal('blah').parse_string('blahfooblah') # -> ['blah'] + Literal('blah').parse_string('bla') # -> Exception: Expected "blah" + + For case-insensitive matching, use :class:`CaselessLiteral`. + + For keyword matching (force word break before and after the matched string), + use :class:`Keyword` or :class:`CaselessKeyword`. + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + super().__init__() + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Literal; use Empty() instead") + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = False + self.mayIndexError = False + + # Performance tuning: modify __class__ to select + # a parseImpl optimized for single-character check + if self.matchLen == 1 and type(self) is Literal: + self.__class__ = _SingleCharLiteral + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar and instring.startswith( + self.match, loc + ): + return loc + self.matchLen, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +class _SingleCharLiteral(Literal): + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] == self.firstMatchChar: + return loc + 1, self.match + raise ParseException(instring, loc, self.errmsg, self) + + +ParserElement._literalStringClass = Literal + + +class Keyword(Token): + """ + Token to exactly match a specified string as a keyword, that is, + it must be immediately followed by a non-keyword character. Compare + with :class:`Literal`: + + - ``Literal("if")`` will match the leading ``'if'`` in + ``'ifAndOnlyIf'``. + - ``Keyword("if")`` will not; it will only match the leading + ``'if'`` in ``'if x=1'``, or ``'if(y==2)'`` + + Accepts two optional constructor arguments in addition to the + keyword string: + + - ``identChars`` is a string of characters that would be valid + identifier characters, defaulting to all alphanumerics + "_" and + "$" + - ``caseless`` allows case-insensitive matching, default is ``False``. + + Example:: + + Keyword("start").parse_string("start") # -> ['start'] + Keyword("start").parse_string("starting") # -> Exception + + For case-insensitive matching, use :class:`CaselessKeyword`. + """ + + DEFAULT_KEYWORD_CHARS = alphanums + "_$" + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + caseless: bool = False, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + super().__init__() + identChars = identChars or ident_chars + if identChars is None: + identChars = Keyword.DEFAULT_KEYWORD_CHARS + match_string = matchString or match_string + self.match = match_string + self.matchLen = len(match_string) + try: + self.firstMatchChar = match_string[0] + except IndexError: + raise ValueError("null string passed to Keyword; use Empty() instead") + self.errmsg = "Expected {} {}".format(type(self).__name__, self.name) + self.mayReturnEmpty = False + self.mayIndexError = False + self.caseless = caseless + if caseless: + self.caselessmatch = match_string.upper() + identChars = identChars.upper() + self.identChars = set(identChars) + + def _generateDefaultName(self): + return repr(self.match) + + def parseImpl(self, instring, loc, doActions=True): + errmsg = self.errmsg + errloc = loc + if self.caseless: + if instring[loc : loc + self.matchLen].upper() == self.caselessmatch: + if loc == 0 or instring[loc - 1].upper() not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen].upper() not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ", was immediately followed by keyword character" + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + else: + if ( + instring[loc] == self.firstMatchChar + and self.matchLen == 1 + or instring.startswith(self.match, loc) + ): + if loc == 0 or instring[loc - 1] not in self.identChars: + if ( + loc >= len(instring) - self.matchLen + or instring[loc + self.matchLen] not in self.identChars + ): + return loc + self.matchLen, self.match + else: + # followed by keyword char + errmsg += ( + ", keyword was immediately followed by keyword character" + ) + errloc = loc + self.matchLen + else: + # preceded by keyword char + errmsg += ", keyword was immediately preceded by keyword character" + errloc = loc - 1 + # else no match just raise plain exception + + raise ParseException(instring, errloc, errmsg, self) + + @staticmethod + def set_default_keyword_chars(chars) -> None: + """ + Overrides the default characters used by :class:`Keyword` expressions. + """ + Keyword.DEFAULT_KEYWORD_CHARS = chars + + setDefaultKeywordChars = set_default_keyword_chars + + +class CaselessLiteral(Literal): + """ + Token to match a specified string, ignoring case of letters. + Note: the matched results will always be in the case of the given + match string, NOT the case of the input text. + + Example:: + + CaselessLiteral("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD', 'CMD'] + + (Contrast with example for :class:`CaselessKeyword`.) + """ + + def __init__(self, match_string: str = "", *, matchString: str = ""): + match_string = matchString or match_string + super().__init__(match_string.upper()) + # Preserve the defining literal. + self.returnString = match_string + self.errmsg = "Expected " + self.name + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc : loc + self.matchLen].upper() == self.match: + return loc + self.matchLen, self.returnString + raise ParseException(instring, loc, self.errmsg, self) + + +class CaselessKeyword(Keyword): + """ + Caseless version of :class:`Keyword`. + + Example:: + + CaselessKeyword("CMD")[1, ...].parse_string("cmd CMD Cmd10") + # -> ['CMD', 'CMD'] + + (Contrast with example for :class:`CaselessLiteral`.) + """ + + def __init__( + self, + match_string: str = "", + ident_chars: typing.Optional[str] = None, + *, + matchString: str = "", + identChars: typing.Optional[str] = None, + ): + identChars = identChars or ident_chars + match_string = matchString or match_string + super().__init__(match_string, identChars, caseless=True) + + +class CloseMatch(Token): + """A variation on :class:`Literal` which matches "close" matches, + that is, strings with at most 'n' mismatching characters. + :class:`CloseMatch` takes parameters: + + - ``match_string`` - string to be matched + - ``caseless`` - a boolean indicating whether to ignore casing when comparing characters + - ``max_mismatches`` - (``default=1``) maximum number of + mismatches allowed to count as a match + + The results from a successful parse will contain the matched text + from the input string and the following named results: + + - ``mismatches`` - a list of the positions within the + match_string where mismatches were found + - ``original`` - the original match_string used to compare + against the input string + + If ``mismatches`` is an empty list, then the match was an exact + match. + + Example:: + + patt = CloseMatch("ATCATCGAATGGA") + patt.parse_string("ATCATCGAAXGGA") # -> (['ATCATCGAAXGGA'], {'mismatches': [[9]], 'original': ['ATCATCGAATGGA']}) + patt.parse_string("ATCAXCGAAXGGA") # -> Exception: Expected 'ATCATCGAATGGA' (with up to 1 mismatches) (at char 0), (line:1, col:1) + + # exact match + patt.parse_string("ATCATCGAATGGA") # -> (['ATCATCGAATGGA'], {'mismatches': [[]], 'original': ['ATCATCGAATGGA']}) + + # close match allowing up to 2 mismatches + patt = CloseMatch("ATCATCGAATGGA", max_mismatches=2) + patt.parse_string("ATCAXCGAAXGGA") # -> (['ATCAXCGAAXGGA'], {'mismatches': [[4, 9]], 'original': ['ATCATCGAATGGA']}) + """ + + def __init__( + self, + match_string: str, + max_mismatches: int = None, + *, + maxMismatches: int = 1, + caseless=False, + ): + maxMismatches = max_mismatches if max_mismatches is not None else maxMismatches + super().__init__() + self.match_string = match_string + self.maxMismatches = maxMismatches + self.errmsg = "Expected {!r} (with up to {} mismatches)".format( + self.match_string, self.maxMismatches + ) + self.caseless = caseless + self.mayIndexError = False + self.mayReturnEmpty = False + + def _generateDefaultName(self): + return "{}:{!r}".format(type(self).__name__, self.match_string) + + def parseImpl(self, instring, loc, doActions=True): + start = loc + instrlen = len(instring) + maxloc = start + len(self.match_string) + + if maxloc <= instrlen: + match_string = self.match_string + match_stringloc = 0 + mismatches = [] + maxMismatches = self.maxMismatches + + for match_stringloc, s_m in enumerate( + zip(instring[loc:maxloc], match_string) + ): + src, mat = s_m + if self.caseless: + src, mat = src.lower(), mat.lower() + + if src != mat: + mismatches.append(match_stringloc) + if len(mismatches) > maxMismatches: + break + else: + loc = start + match_stringloc + 1 + results = ParseResults([instring[start:loc]]) + results["original"] = match_string + results["mismatches"] = mismatches + return loc, results + + raise ParseException(instring, loc, self.errmsg, self) + + +class Word(Token): + """Token for matching words composed of allowed character sets. + Parameters: + - ``init_chars`` - string of all characters that should be used to + match as a word; "ABC" will match "AAA", "ABAB", "CBAC", etc.; + if ``body_chars`` is also specified, then this is the string of + initial characters + - ``body_chars`` - string of characters that + can be used for matching after a matched initial character as + given in ``init_chars``; if omitted, same as the initial characters + (default=``None``) + - ``min`` - minimum number of characters to match (default=1) + - ``max`` - maximum number of characters to match (default=0) + - ``exact`` - exact number of characters to match (default=0) + - ``as_keyword`` - match as a keyword (default=``False``) + - ``exclude_chars`` - characters that might be + found in the input ``body_chars`` string but which should not be + accepted for matching ;useful to define a word of all + printables except for one or two characters, for instance + (default=``None``) + + :class:`srange` is useful for defining custom character set strings + for defining :class:`Word` expressions, using range notation from + regular expression character sets. + + A common mistake is to use :class:`Word` to match a specific literal + string, as in ``Word("Address")``. Remember that :class:`Word` + uses the string argument to define *sets* of matchable characters. + This expression would match "Add", "AAA", "dAred", or any other word + made up of the characters 'A', 'd', 'r', 'e', and 's'. To match an + exact literal string, use :class:`Literal` or :class:`Keyword`. + + pyparsing includes helper strings for building Words: + + - :class:`alphas` + - :class:`nums` + - :class:`alphanums` + - :class:`hexnums` + - :class:`alphas8bit` (alphabetic characters in ASCII range 128-255 + - accented, tilded, umlauted, etc.) + - :class:`punc8bit` (non-alphabetic characters in ASCII range + 128-255 - currency, symbols, superscripts, diacriticals, etc.) + - :class:`printables` (any non-whitespace character) + + ``alphas``, ``nums``, and ``printables`` are also defined in several + Unicode sets - see :class:`pyparsing_unicode``. + + Example:: + + # a word composed of digits + integer = Word(nums) # equivalent to Word("0123456789") or Word(srange("0-9")) + + # a word with a leading capital, and zero or more lowercase + capital_word = Word(alphas.upper(), alphas.lower()) + + # hostnames are alphanumeric, with leading alpha, and '-' + hostname = Word(alphas, alphanums + '-') + + # roman numeral (not a strict parser, accepts invalid mix of characters) + roman = Word("IVXLCDM") + + # any string of non-whitespace characters, except for ',' + csv_value = Word(printables, exclude_chars=",") + """ + + def __init__( + self, + init_chars: str = "", + body_chars: typing.Optional[str] = None, + min: int = 1, + max: int = 0, + exact: int = 0, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + initChars: typing.Optional[str] = None, + bodyChars: typing.Optional[str] = None, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + initChars = initChars or init_chars + bodyChars = bodyChars or body_chars + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__() + if not initChars: + raise ValueError( + "invalid {}, initChars cannot be empty string".format( + type(self).__name__ + ) + ) + + initChars = set(initChars) + self.initChars = initChars + if excludeChars: + excludeChars = set(excludeChars) + initChars -= excludeChars + if bodyChars: + bodyChars = set(bodyChars) - excludeChars + self.initCharsOrig = "".join(sorted(initChars)) + + if bodyChars: + self.bodyCharsOrig = "".join(sorted(bodyChars)) + self.bodyChars = set(bodyChars) + else: + self.bodyCharsOrig = "".join(sorted(initChars)) + self.bodyChars = set(initChars) + + self.maxSpecified = max > 0 + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use Opt(Word()) if zero-length word is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asKeyword = asKeyword + + # see if we can make a regex for this Word + if " " not in self.initChars | self.bodyChars and (min == 1 and exact == 0): + if self.bodyChars == self.initChars: + if max == 0: + repeat = "+" + elif max == 1: + repeat = "" + else: + repeat = "{{{},{}}}".format( + self.minLen, "" if self.maxLen == _MAX_INT else self.maxLen + ) + self.reString = "[{}]{}".format( + _collapse_string_to_ranges(self.initChars), + repeat, + ) + elif len(self.initChars) == 1: + if max == 0: + repeat = "*" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "{}[{}]{}".format( + re.escape(self.initCharsOrig), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + else: + if max == 0: + repeat = "*" + elif max == 2: + repeat = "" + else: + repeat = "{{0,{}}}".format(max - 1) + self.reString = "[{}][{}]{}".format( + _collapse_string_to_ranges(self.initChars), + _collapse_string_to_ranges(self.bodyChars), + repeat, + ) + if self.asKeyword: + self.reString = r"\b" + self.reString + r"\b" + + try: + self.re = re.compile(self.reString) + except re.error: + self.re = None + else: + self.re_match = self.re.match + self.__class__ = _WordRegex + + def _generateDefaultName(self): + def charsAsStr(s): + max_repr_len = 16 + s = _collapse_string_to_ranges(s, re_escape=False) + if len(s) > max_repr_len: + return s[: max_repr_len - 3] + "..." + else: + return s + + if self.initChars != self.bodyChars: + base = "W:({}, {})".format( + charsAsStr(self.initChars), charsAsStr(self.bodyChars) + ) + else: + base = "W:({})".format(charsAsStr(self.initChars)) + + # add length specification + if self.minLen > 1 or self.maxLen != _MAX_INT: + if self.minLen == self.maxLen: + if self.minLen == 1: + return base[2:] + else: + return base + "{{{}}}".format(self.minLen) + elif self.maxLen == _MAX_INT: + return base + "{{{},...}}".format(self.minLen) + else: + return base + "{{{},{}}}".format(self.minLen, self.maxLen) + return base + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.initChars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + instrlen = len(instring) + bodychars = self.bodyChars + maxloc = start + self.maxLen + maxloc = min(maxloc, instrlen) + while loc < maxloc and instring[loc] in bodychars: + loc += 1 + + throwException = False + if loc - start < self.minLen: + throwException = True + elif self.maxSpecified and loc < instrlen and instring[loc] in bodychars: + throwException = True + elif self.asKeyword: + if ( + start > 0 + and instring[start - 1] in bodychars + or loc < instrlen + and instring[loc] in bodychars + ): + throwException = True + + if throwException: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class _WordRegex(Word): + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + return loc, result.group() + + +class Char(_WordRegex): + """A short-cut class for defining :class:`Word` ``(characters, exact=1)``, + when defining a match of any single character in a string of + characters. + """ + + def __init__( + self, + charset: str, + as_keyword: bool = False, + exclude_chars: typing.Optional[str] = None, + *, + asKeyword: bool = False, + excludeChars: typing.Optional[str] = None, + ): + asKeyword = asKeyword or as_keyword + excludeChars = excludeChars or exclude_chars + super().__init__( + charset, exact=1, asKeyword=asKeyword, excludeChars=excludeChars + ) + self.reString = "[{}]".format(_collapse_string_to_ranges(self.initChars)) + if asKeyword: + self.reString = r"\b{}\b".format(self.reString) + self.re = re.compile(self.reString) + self.re_match = self.re.match + + +class Regex(Token): + r"""Token for matching strings that match a given regular + expression. Defined with string specifying the regular expression in + a form recognized by the stdlib Python `re module `_. + If the given regex contains named groups (defined using ``(?P...)``), + these will be preserved as named :class:`ParseResults`. + + If instead of the Python stdlib ``re`` module you wish to use a different RE module + (such as the ``regex`` module), you can do so by building your ``Regex`` object with + a compiled RE that was compiled using ``regex``. + + Example:: + + realnum = Regex(r"[+-]?\d+\.\d*") + # ref: https://stackoverflow.com/questions/267399/how-do-you-match-only-valid-roman-numerals-with-a-regular-expression + roman = Regex(r"M{0,4}(CM|CD|D?{0,3})(XC|XL|L?X{0,3})(IX|IV|V?I{0,3})") + + # named fields in a regex will be returned as named results + date = Regex(r'(?P\d{4})-(?P\d\d?)-(?P\d\d?)') + + # the Regex class will accept re's compiled using the regex module + import regex + parser = pp.Regex(regex.compile(r'[0-9]')) + """ + + def __init__( + self, + pattern: Any, + flags: Union[re.RegexFlag, int] = 0, + as_group_list: bool = False, + as_match: bool = False, + *, + asGroupList: bool = False, + asMatch: bool = False, + ): + """The parameters ``pattern`` and ``flags`` are passed + to the ``re.compile()`` function as-is. See the Python + `re module `_ module for an + explanation of the acceptable patterns and flags. + """ + super().__init__() + asGroupList = asGroupList or as_group_list + asMatch = asMatch or as_match + + if isinstance(pattern, str_type): + if not pattern: + raise ValueError("null string passed to Regex; use Empty() instead") + + self._re = None + self.reString = self.pattern = pattern + self.flags = flags + + elif hasattr(pattern, "pattern") and hasattr(pattern, "match"): + self._re = pattern + self.pattern = self.reString = pattern.pattern + self.flags = flags + + else: + raise TypeError( + "Regex may only be constructed with a string or a compiled RE object" + ) + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.asGroupList = asGroupList + self.asMatch = asMatch + if self.asGroupList: + self.parseImpl = self.parseImplAsGroupList + if self.asMatch: + self.parseImpl = self.parseImplAsMatch + + @cached_property + def re(self): + if self._re: + return self._re + else: + try: + return re.compile(self.pattern, self.flags) + except re.error: + raise ValueError( + "invalid pattern ({!r}) passed to Regex".format(self.pattern) + ) + + @cached_property + def re_match(self): + return self.re.match + + @cached_property + def mayReturnEmpty(self): + return self.re_match("") is not None + + def _generateDefaultName(self): + return "Re:({})".format(repr(self.pattern).replace("\\\\", "\\")) + + def parseImpl(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = ParseResults(result.group()) + d = result.groupdict() + if d: + for k, v in d.items(): + ret[k] = v + return loc, ret + + def parseImplAsGroupList(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.groups() + return loc, ret + + def parseImplAsMatch(self, instring, loc, doActions=True): + result = self.re_match(instring, loc) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result + return loc, ret + + def sub(self, repl: str) -> ParserElement: + r""" + Return :class:`Regex` with an attached parse action to transform the parsed + result as if called using `re.sub(expr, repl, string) `_. + + Example:: + + make_html = Regex(r"(\w+):(.*?):").sub(r"<\1>\2") + print(make_html.transform_string("h1:main title:")) + # prints "

main title

" + """ + if self.asGroupList: + raise TypeError("cannot use sub() with Regex(asGroupList=True)") + + if self.asMatch and callable(repl): + raise TypeError("cannot use sub() with a callable with Regex(asMatch=True)") + + if self.asMatch: + + def pa(tokens): + return tokens[0].expand(repl) + + else: + + def pa(tokens): + return self.re.sub(repl, tokens[0]) + + return self.add_parse_action(pa) + + +class QuotedString(Token): + r""" + Token for matching strings that are delimited by quoting characters. + + Defined with the following parameters: + + - ``quote_char`` - string of one or more characters defining the + quote delimiting string + - ``esc_char`` - character to re_escape quotes, typically backslash + (default= ``None``) + - ``esc_quote`` - special quote sequence to re_escape an embedded quote + string (such as SQL's ``""`` to re_escape an embedded ``"``) + (default= ``None``) + - ``multiline`` - boolean indicating whether quotes can span + multiple lines (default= ``False``) + - ``unquote_results`` - boolean indicating whether the matched text + should be unquoted (default= ``True``) + - ``end_quote_char`` - string of one or more characters defining the + end of the quote delimited string (default= ``None`` => same as + quote_char) + - ``convert_whitespace_escapes`` - convert escaped whitespace + (``'\t'``, ``'\n'``, etc.) to actual whitespace + (default= ``True``) + + Example:: + + qs = QuotedString('"') + print(qs.search_string('lsjdf "This is the quote" sldjf')) + complex_qs = QuotedString('{{', end_quote_char='}}') + print(complex_qs.search_string('lsjdf {{This is the "quote"}} sldjf')) + sql_qs = QuotedString('"', esc_quote='""') + print(sql_qs.search_string('lsjdf "This is the quote with ""embedded"" quotes" sldjf')) + + prints:: + + [['This is the quote']] + [['This is the "quote"']] + [['This is the quote with "embedded" quotes']] + """ + ws_map = ((r"\t", "\t"), (r"\n", "\n"), (r"\f", "\f"), (r"\r", "\r")) + + def __init__( + self, + quote_char: str = "", + esc_char: typing.Optional[str] = None, + esc_quote: typing.Optional[str] = None, + multiline: bool = False, + unquote_results: bool = True, + end_quote_char: typing.Optional[str] = None, + convert_whitespace_escapes: bool = True, + *, + quoteChar: str = "", + escChar: typing.Optional[str] = None, + escQuote: typing.Optional[str] = None, + unquoteResults: bool = True, + endQuoteChar: typing.Optional[str] = None, + convertWhitespaceEscapes: bool = True, + ): + super().__init__() + escChar = escChar or esc_char + escQuote = escQuote or esc_quote + unquoteResults = unquoteResults and unquote_results + endQuoteChar = endQuoteChar or end_quote_char + convertWhitespaceEscapes = ( + convertWhitespaceEscapes and convert_whitespace_escapes + ) + quote_char = quoteChar or quote_char + + # remove white space from quote chars - wont work anyway + quote_char = quote_char.strip() + if not quote_char: + raise ValueError("quote_char cannot be the empty string") + + if endQuoteChar is None: + endQuoteChar = quote_char + else: + endQuoteChar = endQuoteChar.strip() + if not endQuoteChar: + raise ValueError("endQuoteChar cannot be the empty string") + + self.quoteChar = quote_char + self.quoteCharLen = len(quote_char) + self.firstQuoteChar = quote_char[0] + self.endQuoteChar = endQuoteChar + self.endQuoteCharLen = len(endQuoteChar) + self.escChar = escChar + self.escQuote = escQuote + self.unquoteResults = unquoteResults + self.convertWhitespaceEscapes = convertWhitespaceEscapes + + sep = "" + inner_pattern = "" + + if escQuote: + inner_pattern += r"{}(?:{})".format(sep, re.escape(escQuote)) + sep = "|" + + if escChar: + inner_pattern += r"{}(?:{}.)".format(sep, re.escape(escChar)) + sep = "|" + self.escCharReplacePattern = re.escape(self.escChar) + "(.)" + + if len(self.endQuoteChar) > 1: + inner_pattern += ( + "{}(?:".format(sep) + + "|".join( + "(?:{}(?!{}))".format( + re.escape(self.endQuoteChar[:i]), + re.escape(self.endQuoteChar[i:]), + ) + for i in range(len(self.endQuoteChar) - 1, 0, -1) + ) + + ")" + ) + sep = "|" + + if multiline: + self.flags = re.MULTILINE | re.DOTALL + inner_pattern += r"{}(?:[^{}{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + else: + self.flags = 0 + inner_pattern += r"{}(?:[^{}\n\r{}])".format( + sep, + _escape_regex_range_chars(self.endQuoteChar[0]), + (_escape_regex_range_chars(escChar) if escChar is not None else ""), + ) + + self.pattern = "".join( + [ + re.escape(self.quoteChar), + "(?:", + inner_pattern, + ")*", + re.escape(self.endQuoteChar), + ] + ) + + try: + self.re = re.compile(self.pattern, self.flags) + self.reString = self.pattern + self.re_match = self.re.match + except re.error: + raise ValueError( + "invalid pattern {!r} passed to Regex".format(self.pattern) + ) + + self.errmsg = "Expected " + self.name + self.mayIndexError = False + self.mayReturnEmpty = True + + def _generateDefaultName(self): + if self.quoteChar == self.endQuoteChar and isinstance(self.quoteChar, str_type): + return "string enclosed in {!r}".format(self.quoteChar) + + return "quoted string, starting with {} ending with {}".format( + self.quoteChar, self.endQuoteChar + ) + + def parseImpl(self, instring, loc, doActions=True): + result = ( + instring[loc] == self.firstQuoteChar + and self.re_match(instring, loc) + or None + ) + if not result: + raise ParseException(instring, loc, self.errmsg, self) + + loc = result.end() + ret = result.group() + + if self.unquoteResults: + + # strip off quotes + ret = ret[self.quoteCharLen : -self.endQuoteCharLen] + + if isinstance(ret, str_type): + # replace escaped whitespace + if "\\" in ret and self.convertWhitespaceEscapes: + for wslit, wschar in self.ws_map: + ret = ret.replace(wslit, wschar) + + # replace escaped characters + if self.escChar: + ret = re.sub(self.escCharReplacePattern, r"\g<1>", ret) + + # replace escaped quotes + if self.escQuote: + ret = ret.replace(self.escQuote, self.endQuoteChar) + + return loc, ret + + +class CharsNotIn(Token): + """Token for matching words composed of characters *not* in a given + set (will include whitespace in matched characters if not listed in + the provided exclusion set - see example). Defined with string + containing all disallowed characters, and an optional minimum, + maximum, and/or exact length. The default value for ``min`` is + 1 (a minimum value < 1 is not valid); the default values for + ``max`` and ``exact`` are 0, meaning no maximum or exact + length restriction. + + Example:: + + # define a comma-separated-value as anything that is not a ',' + csv_value = CharsNotIn(',') + print(delimited_list(csv_value).parse_string("dkls,lsdkjf,s12 34,@!#,213")) + + prints:: + + ['dkls', 'lsdkjf', 's12 34', '@!#', '213'] + """ + + def __init__( + self, + not_chars: str = "", + min: int = 1, + max: int = 0, + exact: int = 0, + *, + notChars: str = "", + ): + super().__init__() + self.skipWhitespace = False + self.notChars = not_chars or notChars + self.notCharsSet = set(self.notChars) + + if min < 1: + raise ValueError( + "cannot specify a minimum length < 1; use " + "Opt(CharsNotIn()) if zero-length char group is permitted" + ) + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + self.errmsg = "Expected " + self.name + self.mayReturnEmpty = self.minLen == 0 + self.mayIndexError = False + + def _generateDefaultName(self): + not_chars_str = _collapse_string_to_ranges(self.notChars) + if len(not_chars_str) > 16: + return "!W:({}...)".format(self.notChars[: 16 - 3]) + else: + return "!W:({})".format(self.notChars) + + def parseImpl(self, instring, loc, doActions=True): + notchars = self.notCharsSet + if instring[loc] in notchars: + raise ParseException(instring, loc, self.errmsg, self) + + start = loc + loc += 1 + maxlen = min(start + self.maxLen, len(instring)) + while loc < maxlen and instring[loc] not in notchars: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class White(Token): + """Special matching class for matching whitespace. Normally, + whitespace is ignored by pyparsing grammars. This class is included + when some whitespace structures are significant. Define with + a string containing the whitespace characters to be matched; default + is ``" \\t\\r\\n"``. Also takes optional ``min``, + ``max``, and ``exact`` arguments, as defined for the + :class:`Word` class. + """ + + whiteStrs = { + " ": "", + "\t": "", + "\n": "", + "\r": "", + "\f": "", + "\u00A0": "", + "\u1680": "", + "\u180E": "", + "\u2000": "", + "\u2001": "", + "\u2002": "", + "\u2003": "", + "\u2004": "", + "\u2005": "", + "\u2006": "", + "\u2007": "", + "\u2008": "", + "\u2009": "", + "\u200A": "", + "\u200B": "", + "\u202F": "", + "\u205F": "", + "\u3000": "", + } + + def __init__(self, ws: str = " \t\r\n", min: int = 1, max: int = 0, exact: int = 0): + super().__init__() + self.matchWhite = ws + self.set_whitespace_chars( + "".join(c for c in self.whiteStrs if c not in self.matchWhite), + copy_defaults=True, + ) + # self.leave_whitespace() + self.mayReturnEmpty = True + self.errmsg = "Expected " + self.name + + self.minLen = min + + if max > 0: + self.maxLen = max + else: + self.maxLen = _MAX_INT + + if exact > 0: + self.maxLen = exact + self.minLen = exact + + def _generateDefaultName(self): + return "".join(White.whiteStrs[c] for c in self.matchWhite) + + def parseImpl(self, instring, loc, doActions=True): + if instring[loc] not in self.matchWhite: + raise ParseException(instring, loc, self.errmsg, self) + start = loc + loc += 1 + maxloc = start + self.maxLen + maxloc = min(maxloc, len(instring)) + while loc < maxloc and instring[loc] in self.matchWhite: + loc += 1 + + if loc - start < self.minLen: + raise ParseException(instring, loc, self.errmsg, self) + + return loc, instring[start:loc] + + +class PositionToken(Token): + def __init__(self): + super().__init__() + self.mayReturnEmpty = True + self.mayIndexError = False + + +class GoToColumn(PositionToken): + """Token to advance to a specific column of input text; useful for + tabular report scraping. + """ + + def __init__(self, colno: int): + super().__init__() + self.col = colno + + def preParse(self, instring, loc): + if col(loc, instring) != self.col: + instrlen = len(instring) + if self.ignoreExprs: + loc = self._skipIgnorables(instring, loc) + while ( + loc < instrlen + and instring[loc].isspace() + and col(loc, instring) != self.col + ): + loc += 1 + return loc + + def parseImpl(self, instring, loc, doActions=True): + thiscol = col(loc, instring) + if thiscol > self.col: + raise ParseException(instring, loc, "Text not in expected column", self) + newloc = loc + self.col - thiscol + ret = instring[loc:newloc] + return newloc, ret + + +class LineStart(PositionToken): + r"""Matches if current position is at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (LineStart() + 'AAA' + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self): + super().__init__() + self.leave_whitespace() + self.orig_whiteChars = set() | self.whiteChars + self.whiteChars.discard("\n") + self.skipper = Empty().set_whitespace_chars(self.whiteChars) + self.errmsg = "Expected start of line" + + def preParse(self, instring, loc): + if loc == 0: + return loc + else: + ret = self.skipper.preParse(instring, loc) + if "\n" in self.orig_whiteChars: + while instring[ret : ret + 1] == "\n": + ret = self.skipper.preParse(instring, ret + 1) + return ret + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) == 1: + return loc, [] + raise ParseException(instring, loc, self.errmsg, self) + + +class LineEnd(PositionToken): + """Matches if current position is at the end of a line within the + parse string + """ + + def __init__(self): + super().__init__() + self.whiteChars.discard("\n") + self.set_whitespace_chars(self.whiteChars, copy_defaults=False) + self.errmsg = "Expected end of line" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + if instring[loc] == "\n": + return loc + 1, "\n" + else: + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class StringStart(PositionToken): + """Matches if current position is at the beginning of the parse + string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected start of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + # see if entire string up to here is just whitespace and ignoreables + if loc != self.preParse(instring, 0): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class StringEnd(PositionToken): + """ + Matches if current position is at the end of the parse string + """ + + def __init__(self): + super().__init__() + self.errmsg = "Expected end of text" + + def parseImpl(self, instring, loc, doActions=True): + if loc < len(instring): + raise ParseException(instring, loc, self.errmsg, self) + elif loc == len(instring): + return loc + 1, [] + elif loc > len(instring): + return loc, [] + else: + raise ParseException(instring, loc, self.errmsg, self) + + +class WordStart(PositionToken): + """Matches if the current position is at the beginning of a + :class:`Word`, and is not preceded by any character in a given + set of ``word_chars`` (default= ``printables``). To emulate the + ``\b`` behavior of regular expressions, use + ``WordStart(alphanums)``. ``WordStart`` will also match at + the beginning of the string being parsed, or at the beginning of + a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.errmsg = "Not at the start of a word" + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + if ( + instring[loc - 1] in self.wordChars + or instring[loc] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class WordEnd(PositionToken): + """Matches if the current position is at the end of a :class:`Word`, + and is not followed by any character in a given set of ``word_chars`` + (default= ``printables``). To emulate the ``\b`` behavior of + regular expressions, use ``WordEnd(alphanums)``. ``WordEnd`` + will also match at the end of the string being parsed, or at the end + of a line. + """ + + def __init__(self, word_chars: str = printables, *, wordChars: str = printables): + wordChars = word_chars if wordChars == printables else wordChars + super().__init__() + self.wordChars = set(wordChars) + self.skipWhitespace = False + self.errmsg = "Not at the end of a word" + + def parseImpl(self, instring, loc, doActions=True): + instrlen = len(instring) + if instrlen > 0 and loc < instrlen: + if ( + instring[loc] in self.wordChars + or instring[loc - 1] not in self.wordChars + ): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + +class ParseExpression(ParserElement): + """Abstract subclass of ParserElement, for combining and + post-processing parsed tokens. + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(savelist) + self.exprs: List[ParserElement] + if isinstance(exprs, _generatorType): + exprs = list(exprs) + + if isinstance(exprs, str_type): + self.exprs = [self._literalStringClass(exprs)] + elif isinstance(exprs, ParserElement): + self.exprs = [exprs] + elif isinstance(exprs, Iterable): + exprs = list(exprs) + # if sequence of strings provided, wrap with Literal + if any(isinstance(expr, str_type) for expr in exprs): + exprs = ( + self._literalStringClass(e) if isinstance(e, str_type) else e + for e in exprs + ) + self.exprs = list(exprs) + else: + try: + self.exprs = list(exprs) + except TypeError: + self.exprs = [exprs] + self.callPreparse = False + + def recurse(self) -> Sequence[ParserElement]: + return self.exprs[:] + + def append(self, other) -> ParserElement: + self.exprs.append(other) + self._defaultName = None + return self + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``leave_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().leave_whitespace(recursive) + + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + """ + Extends ``ignore_whitespace`` defined in base class, and also invokes ``leave_whitespace`` on + all contained expressions. + """ + super().ignore_whitespace(recursive) + if recursive: + self.exprs = [e.copy() for e in self.exprs] + for e in self.exprs: + e.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + for e in self.exprs: + e.ignore(self.ignoreExprs[-1]) + return self + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.exprs)) + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + + for e in self.exprs: + e.streamline() + + # collapse nested :class:`And`'s of the form ``And(And(And(a, b), c), d)`` to ``And(a, b, c, d)`` + # but only if there are no parse actions or resultsNames on the nested And's + # (likewise for :class:`Or`'s and :class:`MatchFirst`'s) + if len(self.exprs) == 2: + other = self.exprs[0] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = other.exprs[:] + [self.exprs[1]] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + other = self.exprs[-1] + if ( + isinstance(other, self.__class__) + and not other.parseAction + and other.resultsName is None + and not other.debug + ): + self.exprs = self.exprs[:-1] + other.exprs[:] + self._defaultName = None + self.mayReturnEmpty |= other.mayReturnEmpty + self.mayIndexError |= other.mayIndexError + + self.errmsg = "Expected " + str(self) + + return self + + def validate(self, validateTrace=None) -> None: + tmp = (validateTrace if validateTrace is not None else [])[:] + [self] + for e in self.exprs: + e.validate(tmp) + self._checkRecursion([]) + + def copy(self) -> ParserElement: + ret = super().copy() + ret.exprs = [e.copy() for e in self.exprs] + return ret + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in self.exprs: + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class And(ParseExpression): + """ + Requires all given :class:`ParseExpression` s to be found in the given order. + Expressions may be separated by whitespace. + May be constructed using the ``'+'`` operator. + May also be constructed using the ``'-'`` operator, which will + suppress backtracking. + + Example:: + + integer = Word(nums) + name_expr = Word(alphas)[1, ...] + + expr = And([integer("id"), name_expr("name"), integer("age")]) + # more easily written as: + expr = integer("id") + name_expr("name") + integer("age") + """ + + class _ErrorStop(Empty): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.leave_whitespace() + + def _generateDefaultName(self): + return "-" + + def __init__( + self, exprs_arg: typing.Iterable[ParserElement], savelist: bool = True + ): + exprs: List[ParserElement] = list(exprs_arg) + if exprs and Ellipsis in exprs: + tmp = [] + for i, expr in enumerate(exprs): + if expr is Ellipsis: + if i < len(exprs) - 1: + skipto_arg: ParserElement = (Empty() + exprs[i + 1]).exprs[-1] + tmp.append(SkipTo(skipto_arg)("_skipped*")) + else: + raise Exception( + "cannot construct And with sequence ending in ..." + ) + else: + tmp.append(expr) + exprs[:] = tmp + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + if not isinstance(self.exprs[0], White): + self.set_whitespace_chars( + self.exprs[0].whiteChars, + copy_defaults=self.exprs[0].copyDefaultWhiteChars, + ) + self.skipWhitespace = self.exprs[0].skipWhitespace + else: + self.skipWhitespace = False + else: + self.mayReturnEmpty = True + self.callPreparse = True + + def streamline(self) -> ParserElement: + # collapse any _PendingSkip's + if self.exprs: + if any( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + for e in self.exprs[:-1] + ): + for i, e in enumerate(self.exprs[:-1]): + if e is None: + continue + if ( + isinstance(e, ParseExpression) + and e.exprs + and isinstance(e.exprs[-1], _PendingSkip) + ): + e.exprs[-1] = e.exprs[-1] + self.exprs[i + 1] + self.exprs[i + 1] = None + self.exprs = [e for e in self.exprs if e is not None] + + super().streamline() + + # link any IndentedBlocks to the prior expression + for prev, cur in zip(self.exprs, self.exprs[1:]): + # traverse cur or any first embedded expr of cur looking for an IndentedBlock + # (but watch out for recursive grammar) + seen = set() + while cur: + if id(cur) in seen: + break + seen.add(id(cur)) + if isinstance(cur, IndentedBlock): + prev.add_parse_action( + lambda s, l, t, cur_=cur: setattr( + cur_, "parent_anchor", col(l, s) + ) + ) + break + subs = cur.recurse() + cur = next(iter(subs), None) + + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + return self + + def parseImpl(self, instring, loc, doActions=True): + # pass False as callPreParse arg to _parse for first element, since we already + # pre-parsed the string as part of our And pre-parsing + loc, resultlist = self.exprs[0]._parse( + instring, loc, doActions, callPreParse=False + ) + errorStop = False + for e in self.exprs[1:]: + # if isinstance(e, And._ErrorStop): + if type(e) is And._ErrorStop: + errorStop = True + continue + if errorStop: + try: + loc, exprtokens = e._parse(instring, loc, doActions) + except ParseSyntaxException: + raise + except ParseBaseException as pe: + pe.__traceback__ = None + raise ParseSyntaxException._from_exception(pe) + except IndexError: + raise ParseSyntaxException( + instring, len(instring), self.errmsg, self + ) + else: + loc, exprtokens = e._parse(instring, loc, doActions) + if exprtokens or exprtokens.haskeys(): + resultlist += exprtokens + return loc, resultlist + + def __iadd__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # And([self, other]) + + def _checkRecursion(self, parseElementList): + subRecCheckList = parseElementList[:] + [self] + for e in self.exprs: + e._checkRecursion(subRecCheckList) + if not e.mayReturnEmpty: + break + + def _generateDefaultName(self): + inner = " ".join(str(e) for e in self.exprs) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "{" + inner + "}" + + +class Or(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + two expressions match, the expression that matches the longest + string will be used. May be constructed using the ``'^'`` + operator. + + Example:: + + # construct Or using '^' operator + + number = Word(nums) ^ Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) + + prints:: + + [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + matches = [] + fatals = [] + if all(e.callPreparse for e in self.exprs): + loc = self.preParse(instring, loc) + for e in self.exprs: + try: + loc2 = e.try_parse(instring, loc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + maxException = None + maxExcLoc = -1 + except ParseException as err: + if not fatals: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + else: + # save match among all matches, to retry longest to shortest + matches.append((loc2, e)) + + if matches: + # re-evaluate all matches in descending order of length of match, in case attached actions + # might change whether or how much they match of the input. + matches.sort(key=itemgetter(0), reverse=True) + + if not doActions: + # no further conditions or parse actions to change the selection of + # alternative, so the first match will be the best match + best_expr = matches[0][1] + return best_expr._parse(instring, loc, doActions) + + longest = -1, None + for loc1, expr1 in matches: + if loc1 <= longest[0]: + # already have a longer match than this one will deliver, we are done + return longest + + try: + loc2, toks = expr1._parse(instring, loc, doActions) + except ParseException as err: + err.__traceback__ = None + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + else: + if loc2 >= loc1: + return loc2, toks + # didn't match as much as before + elif loc2 > longest[0]: + longest = loc2, toks + + if longest != (-1, None): + return longest + + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ixor__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # Or([self, other]) + + def _generateDefaultName(self): + return "{" + " ^ ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose " + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class MatchFirst(ParseExpression): + """Requires that at least one :class:`ParseExpression` is found. If + more than one expression matches, the first one listed is the one that will + match. May be constructed using the ``'|'`` operator. + + Example:: + + # construct MatchFirst using '|' operator + + # watch the order of expressions to match + number = Word(nums) | Combine(Word(nums) + '.' + Word(nums)) + print(number.search_string("123 3.1416 789")) # Fail! -> [['123'], ['3'], ['1416'], ['789']] + + # put more selective expression first + number = Combine(Word(nums) + '.' + Word(nums)) | Word(nums) + print(number.search_string("123 3.1416 789")) # Better -> [['123'], ['3.1416'], ['789']] + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = False): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all(e.skipWhitespace for e in self.exprs) + else: + self.mayReturnEmpty = True + + def streamline(self) -> ParserElement: + if self.streamlined: + return self + + super().streamline() + if self.exprs: + self.saveAsList = any(e.saveAsList for e in self.exprs) + self.mayReturnEmpty = any(e.mayReturnEmpty for e in self.exprs) + self.skipWhitespace = all( + e.skipWhitespace and not isinstance(e, White) for e in self.exprs + ) + else: + self.saveAsList = False + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + maxExcLoc = -1 + maxException = None + + for e in self.exprs: + try: + return e._parse( + instring, + loc, + doActions, + ) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + raise + except ParseException as err: + if err.loc > maxExcLoc: + maxException = err + maxExcLoc = err.loc + except IndexError: + if len(instring) > maxExcLoc: + maxException = ParseException( + instring, len(instring), e.errmsg, self + ) + maxExcLoc = len(instring) + + if maxException is not None: + maxException.msg = self.errmsg + raise maxException + else: + raise ParseException( + instring, loc, "no defined alternatives to match", self + ) + + def __ior__(self, other): + if isinstance(other, str_type): + other = self._literalStringClass(other) + return self.append(other) # MatchFirst([self, other]) + + def _generateDefaultName(self): + return "{" + " | ".join(str(e) for e in self.exprs) + "}" + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_multiple_tokens_in_named_alternation + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in self.suppress_warnings_ + ): + if any( + isinstance(e, And) + and Diagnostics.warn_multiple_tokens_in_named_alternation + not in e.suppress_warnings_ + for e in self.exprs + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "will return a list of all parsed tokens in an And alternative, " + "in prior versions only the first token was returned; enclose " + "contained argument in Group".format( + "warn_multiple_tokens_in_named_alternation", + name, + type(self).__name__, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class Each(ParseExpression): + """Requires all given :class:`ParseExpression` s to be found, but in + any order. Expressions may be separated by whitespace. + + May be constructed using the ``'&'`` operator. + + Example:: + + color = one_of("RED ORANGE YELLOW GREEN BLUE PURPLE BLACK WHITE BROWN") + shape_type = one_of("SQUARE CIRCLE TRIANGLE STAR HEXAGON OCTAGON") + integer = Word(nums) + shape_attr = "shape:" + shape_type("shape") + posn_attr = "posn:" + Group(integer("x") + ',' + integer("y"))("posn") + color_attr = "color:" + color("color") + size_attr = "size:" + integer("size") + + # use Each (using operator '&') to accept attributes in any order + # (shape and posn are required, color and size are optional) + shape_spec = shape_attr & posn_attr & Opt(color_attr) & Opt(size_attr) + + shape_spec.run_tests(''' + shape: SQUARE color: BLACK posn: 100, 120 + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + color:GREEN size:20 shape:TRIANGLE posn:20,40 + ''' + ) + + prints:: + + shape: SQUARE color: BLACK posn: 100, 120 + ['shape:', 'SQUARE', 'color:', 'BLACK', 'posn:', ['100', ',', '120']] + - color: BLACK + - posn: ['100', ',', '120'] + - x: 100 + - y: 120 + - shape: SQUARE + + + shape: CIRCLE size: 50 color: BLUE posn: 50,80 + ['shape:', 'CIRCLE', 'size:', '50', 'color:', 'BLUE', 'posn:', ['50', ',', '80']] + - color: BLUE + - posn: ['50', ',', '80'] + - x: 50 + - y: 80 + - shape: CIRCLE + - size: 50 + + + color: GREEN size: 20 shape: TRIANGLE posn: 20,40 + ['color:', 'GREEN', 'size:', '20', 'shape:', 'TRIANGLE', 'posn:', ['20', ',', '40']] + - color: GREEN + - posn: ['20', ',', '40'] + - x: 20 + - y: 40 + - shape: TRIANGLE + - size: 20 + """ + + def __init__(self, exprs: typing.Iterable[ParserElement], savelist: bool = True): + super().__init__(exprs, savelist) + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + self.skipWhitespace = True + self.initExprGroups = True + self.saveAsList = True + + def streamline(self) -> ParserElement: + super().streamline() + if self.exprs: + self.mayReturnEmpty = all(e.mayReturnEmpty for e in self.exprs) + else: + self.mayReturnEmpty = True + return self + + def parseImpl(self, instring, loc, doActions=True): + if self.initExprGroups: + self.opt1map = dict( + (id(e.expr), e) for e in self.exprs if isinstance(e, Opt) + ) + opt1 = [e.expr for e in self.exprs if isinstance(e, Opt)] + opt2 = [ + e + for e in self.exprs + if e.mayReturnEmpty and not isinstance(e, (Opt, Regex, ZeroOrMore)) + ] + self.optionals = opt1 + opt2 + self.multioptionals = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, _MultipleMatch) + ] + self.multirequired = [ + e.expr.set_results_name(e.resultsName, list_all_matches=True) + for e in self.exprs + if isinstance(e, OneOrMore) + ] + self.required = [ + e for e in self.exprs if not isinstance(e, (Opt, ZeroOrMore, OneOrMore)) + ] + self.required += self.multirequired + self.initExprGroups = False + + tmpLoc = loc + tmpReqd = self.required[:] + tmpOpt = self.optionals[:] + multis = self.multioptionals[:] + matchOrder = [] + + keepMatching = True + failed = [] + fatals = [] + while keepMatching: + tmpExprs = tmpReqd + tmpOpt + multis + failed.clear() + fatals.clear() + for e in tmpExprs: + try: + tmpLoc = e.try_parse(instring, tmpLoc, raise_fatal=True) + except ParseFatalException as pfe: + pfe.__traceback__ = None + pfe.parserElement = e + fatals.append(pfe) + failed.append(e) + except ParseException: + failed.append(e) + else: + matchOrder.append(self.opt1map.get(id(e), e)) + if e in tmpReqd: + tmpReqd.remove(e) + elif e in tmpOpt: + tmpOpt.remove(e) + if len(failed) == len(tmpExprs): + keepMatching = False + + # look for any ParseFatalExceptions + if fatals: + if len(fatals) > 1: + fatals.sort(key=lambda e: -e.loc) + if fatals[0].loc == fatals[1].loc: + fatals.sort(key=lambda e: (-e.loc, -len(str(e.parserElement)))) + max_fatal = fatals[0] + raise max_fatal + + if tmpReqd: + missing = ", ".join([str(e) for e in tmpReqd]) + raise ParseException( + instring, + loc, + "Missing one or more required elements ({})".format(missing), + ) + + # add any unmatched Opts, in case they have default values defined + matchOrder += [e for e in self.exprs if isinstance(e, Opt) and e.expr in tmpOpt] + + total_results = ParseResults([]) + for e in matchOrder: + loc, results = e._parse(instring, loc, doActions) + total_results += results + + return loc, total_results + + def _generateDefaultName(self): + return "{" + " & ".join(str(e) for e in self.exprs) + "}" + + +class ParseElementEnhance(ParserElement): + """Abstract subclass of :class:`ParserElement`, for combining and + post-processing parsed tokens. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + super().__init__(savelist) + if isinstance(expr, str_type): + if issubclass(self._literalStringClass, Token): + expr = self._literalStringClass(expr) + elif issubclass(type(self), self._literalStringClass): + expr = Literal(expr) + else: + expr = self._literalStringClass(Literal(expr)) + self.expr = expr + if expr is not None: + self.mayIndexError = expr.mayIndexError + self.mayReturnEmpty = expr.mayReturnEmpty + self.set_whitespace_chars( + expr.whiteChars, copy_defaults=expr.copyDefaultWhiteChars + ) + self.skipWhitespace = expr.skipWhitespace + self.saveAsList = expr.saveAsList + self.callPreparse = expr.callPreparse + self.ignoreExprs.extend(expr.ignoreExprs) + + def recurse(self) -> Sequence[ParserElement]: + return [self.expr] if self.expr is not None else [] + + def parseImpl(self, instring, loc, doActions=True): + if self.expr is not None: + return self.expr._parse(instring, loc, doActions, callPreParse=False) + else: + raise ParseException(instring, loc, "No expression defined", self) + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + super().leave_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.leave_whitespace(recursive) + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + super().ignore_whitespace(recursive) + + if recursive: + self.expr = self.expr.copy() + if self.expr is not None: + self.expr.ignore_whitespace(recursive) + return self + + def ignore(self, other) -> ParserElement: + if isinstance(other, Suppress): + if other not in self.ignoreExprs: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + else: + super().ignore(other) + if self.expr is not None: + self.expr.ignore(self.ignoreExprs[-1]) + return self + + def streamline(self) -> ParserElement: + super().streamline() + if self.expr is not None: + self.expr.streamline() + return self + + def _checkRecursion(self, parseElementList): + if self in parseElementList: + raise RecursiveGrammarException(parseElementList + [self]) + subRecCheckList = parseElementList[:] + [self] + if self.expr is not None: + self.expr._checkRecursion(subRecCheckList) + + def validate(self, validateTrace=None) -> None: + if validateTrace is None: + validateTrace = [] + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + return "{}:({})".format(self.__class__.__name__, str(self.expr)) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class IndentedBlock(ParseElementEnhance): + """ + Expression to match one or more expressions at a given indentation level. + Useful for parsing text where structure is implied by indentation (like Python source code). + """ + + class _Indent(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) == ref_col) + + class _IndentGreater(Empty): + def __init__(self, ref_col: int): + super().__init__() + self.errmsg = "expected indent at column greater than {}".format(ref_col) + self.add_condition(lambda s, l, t: col(l, s) > ref_col) + + def __init__( + self, expr: ParserElement, *, recursive: bool = False, grouped: bool = True + ): + super().__init__(expr, savelist=True) + # if recursive: + # raise NotImplementedError("IndentedBlock with recursive is not implemented") + self._recursive = recursive + self._grouped = grouped + self.parent_anchor = 1 + + def parseImpl(self, instring, loc, doActions=True): + # advance parse position to non-whitespace by using an Empty() + # this should be the column to be used for all subsequent indented lines + anchor_loc = Empty().preParse(instring, loc) + + # see if self.expr matches at the current location - if not it will raise an exception + # and no further work is necessary + self.expr.try_parse(instring, anchor_loc, doActions) + + indent_col = col(anchor_loc, instring) + peer_detect_expr = self._Indent(indent_col) + + inner_expr = Empty() + peer_detect_expr + self.expr + if self._recursive: + sub_indent = self._IndentGreater(indent_col) + nested_block = IndentedBlock( + self.expr, recursive=self._recursive, grouped=self._grouped + ) + nested_block.set_debug(self.debug) + nested_block.parent_anchor = indent_col + inner_expr += Opt(sub_indent + nested_block) + + inner_expr.set_name(f"inner {hex(id(inner_expr))[-4:].upper()}@{indent_col}") + block = OneOrMore(inner_expr) + + trailing_undent = self._Indent(self.parent_anchor) | StringEnd() + + if self._grouped: + wrapper = Group + else: + wrapper = lambda expr: expr + return (wrapper(block) + Optional(trailing_undent)).parseImpl( + instring, anchor_loc, doActions + ) + + +class AtStringStart(ParseElementEnhance): + """Matches if expression matches at the beginning of the parse + string:: + + AtStringStart(Word(nums)).parse_string("123") + # prints ["123"] + + AtStringStart(Word(nums)).parse_string(" 123") + # raises ParseException + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if loc != 0: + raise ParseException(instring, loc, "not found at string start") + return super().parseImpl(instring, loc, doActions) + + +class AtLineStart(ParseElementEnhance): + r"""Matches if an expression matches at the beginning of a line within + the parse string + + Example:: + + test = '''\ + AAA this line + AAA and this line + AAA but not this one + B AAA and definitely not this one + ''' + + for t in (AtLineStart('AAA') + restOfLine).search_string(test): + print(t) + + prints:: + + ['AAA', ' this line'] + ['AAA', ' and this line'] + + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.callPreparse = False + + def parseImpl(self, instring, loc, doActions=True): + if col(loc, instring) != 1: + raise ParseException(instring, loc, "not found at line start") + return super().parseImpl(instring, loc, doActions) + + +class FollowedBy(ParseElementEnhance): + """Lookahead matching of the given parse expression. + ``FollowedBy`` does *not* advance the parsing position within + the input string, it only verifies that the specified parse + expression matches at the current position. ``FollowedBy`` + always returns a null token list. If any results names are defined + in the lookahead expression, those *will* be returned for access by + name. + + Example:: + + # use FollowedBy to match a label only if it is followed by a ':' + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + attr_expr[1, ...].parse_string("shape: SQUARE color: BLACK posn: upper left").pprint() + + prints:: + + [['shape', 'SQUARE'], ['color', 'BLACK'], ['posn', 'upper left']] + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + # by using self._expr.parse and deleting the contents of the returned ParseResults list + # we keep any named results that were defined in the FollowedBy expression + _, ret = self.expr._parse(instring, loc, doActions=doActions) + del ret[:] + + return loc, ret + + +class PrecededBy(ParseElementEnhance): + """Lookbehind matching of the given parse expression. + ``PrecededBy`` does not advance the parsing position within the + input string, it only verifies that the specified parse expression + matches prior to the current position. ``PrecededBy`` always + returns a null token list, but if a results name is defined on the + given expression, it is returned. + + Parameters: + + - expr - expression that must match prior to the current parse + location + - retreat - (default= ``None``) - (int) maximum number of characters + to lookbehind prior to the current parse location + + If the lookbehind expression is a string, :class:`Literal`, + :class:`Keyword`, or a :class:`Word` or :class:`CharsNotIn` + with a specified exact or maximum length, then the retreat + parameter is not required. Otherwise, retreat must be specified to + give a maximum number of characters to look back from + the current parse position for a lookbehind match. + + Example:: + + # VB-style variable names with type prefixes + int_var = PrecededBy("#") + pyparsing_common.identifier + str_var = PrecededBy("$") + pyparsing_common.identifier + + """ + + def __init__( + self, expr: Union[ParserElement, str], retreat: typing.Optional[int] = None + ): + super().__init__(expr) + self.expr = self.expr().leave_whitespace() + self.mayReturnEmpty = True + self.mayIndexError = False + self.exact = False + if isinstance(expr, str_type): + retreat = len(expr) + self.exact = True + elif isinstance(expr, (Literal, Keyword)): + retreat = expr.matchLen + self.exact = True + elif isinstance(expr, (Word, CharsNotIn)) and expr.maxLen != _MAX_INT: + retreat = expr.maxLen + self.exact = True + elif isinstance(expr, PositionToken): + retreat = 0 + self.exact = True + self.retreat = retreat + self.errmsg = "not preceded by " + str(expr) + self.skipWhitespace = False + self.parseAction.append(lambda s, l, t: t.__delitem__(slice(None, None))) + + def parseImpl(self, instring, loc=0, doActions=True): + if self.exact: + if loc < self.retreat: + raise ParseException(instring, loc, self.errmsg) + start = loc - self.retreat + _, ret = self.expr._parse(instring, start) + else: + # retreat specified a maximum lookbehind window, iterate + test_expr = self.expr + StringEnd() + instring_slice = instring[max(0, loc - self.retreat) : loc] + last_expr = ParseException(instring, loc, self.errmsg) + for offset in range(1, min(loc, self.retreat + 1) + 1): + try: + # print('trying', offset, instring_slice, repr(instring_slice[loc - offset:])) + _, ret = test_expr._parse( + instring_slice, len(instring_slice) - offset + ) + except ParseBaseException as pbe: + last_expr = pbe + else: + break + else: + raise last_expr + return loc, ret + + +class Located(ParseElementEnhance): + """ + Decorates a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parse_with_tabs` + + Example:: + + wd = Word(alphas) + for match in Located(wd).search_string("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [0, ['ljsdf'], 5] + [8, ['lksdjjf'], 15] + [18, ['lkkjj'], 23] + + """ + + def parseImpl(self, instring, loc, doActions=True): + start = loc + loc, tokens = self.expr._parse(instring, start, doActions, callPreParse=False) + ret_tokens = ParseResults([start, tokens, loc]) + ret_tokens["locn_start"] = start + ret_tokens["value"] = tokens + ret_tokens["locn_end"] = loc + if self.resultsName: + # must return as a list, so that the name will be attached to the complete group + return loc, [ret_tokens] + else: + return loc, ret_tokens + + +class NotAny(ParseElementEnhance): + """ + Lookahead to disallow matching with the given parse expression. + ``NotAny`` does *not* advance the parsing position within the + input string, it only verifies that the specified parse expression + does *not* match at the current position. Also, ``NotAny`` does + *not* skip over leading whitespace. ``NotAny`` always returns + a null token list. May be constructed using the ``'~'`` operator. + + Example:: + + AND, OR, NOT = map(CaselessKeyword, "AND OR NOT".split()) + + # take care not to mistake keywords for identifiers + ident = ~(AND | OR | NOT) + Word(alphas) + boolean_term = Opt(NOT) + ident + + # very crude boolean expression - to support parenthesis groups and + # operation hierarchy, use infix_notation + boolean_expr = boolean_term + ((AND | OR) + boolean_term)[...] + + # integers that are followed by "." are actually floats + integer = Word(nums) + ~Char(".") + """ + + def __init__(self, expr: Union[ParserElement, str]): + super().__init__(expr) + # do NOT use self.leave_whitespace(), don't want to propagate to exprs + # self.leave_whitespace() + self.skipWhitespace = False + + self.mayReturnEmpty = True + self.errmsg = "Found unwanted token, " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + if self.expr.can_parse_next(instring, loc): + raise ParseException(instring, loc, self.errmsg, self) + return loc, [] + + def _generateDefaultName(self): + return "~{" + str(self.expr) + "}" + + +class _MultipleMatch(ParseElementEnhance): + def __init__( + self, + expr: ParserElement, + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr) + stopOn = stopOn or stop_on + self.saveAsList = True + ender = stopOn + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.stopOn(ender) + + def stopOn(self, ender) -> ParserElement: + if isinstance(ender, str_type): + ender = self._literalStringClass(ender) + self.not_ender = ~ender if ender is not None else None + return self + + def parseImpl(self, instring, loc, doActions=True): + self_expr_parse = self.expr._parse + self_skip_ignorables = self._skipIgnorables + check_ender = self.not_ender is not None + if check_ender: + try_not_ender = self.not_ender.tryParse + + # must be at least one (but first see if we are the stopOn sentinel; + # if so, fail) + if check_ender: + try_not_ender(instring, loc) + loc, tokens = self_expr_parse(instring, loc, doActions) + try: + hasIgnoreExprs = not not self.ignoreExprs + while 1: + if check_ender: + try_not_ender(instring, loc) + if hasIgnoreExprs: + preloc = self_skip_ignorables(instring, loc) + else: + preloc = loc + loc, tmptokens = self_expr_parse(instring, preloc, doActions) + if tmptokens or tmptokens.haskeys(): + tokens += tmptokens + except (ParseException, IndexError): + pass + + return loc, tokens + + def _setResultsName(self, name, listAllMatches=False): + if ( + __diag__.warn_ungrouped_named_tokens_in_collection + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in self.suppress_warnings_ + ): + for e in [self.expr] + self.expr.recurse(): + if ( + isinstance(e, ParserElement) + and e.resultsName + and Diagnostics.warn_ungrouped_named_tokens_in_collection + not in e.suppress_warnings_ + ): + warnings.warn( + "{}: setting results name {!r} on {} expression " + "collides with {!r} on contained expression".format( + "warn_ungrouped_named_tokens_in_collection", + name, + type(self).__name__, + e.resultsName, + ), + stacklevel=3, + ) + + return super()._setResultsName(name, listAllMatches) + + +class OneOrMore(_MultipleMatch): + """ + Repetition of one or more of the given expression. + + Parameters: + - expr - expression that must match one or more times + - stop_on - (default= ``None``) - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word).set_parse_action(' '.join)) + + text = "shape: SQUARE posn: upper left color: BLACK" + attr_expr[1, ...].parse_string(text).pprint() # Fail! read 'color' as data instead of next label -> [['shape', 'SQUARE color']] + + # use stop_on attribute for OneOrMore to avoid reading label string as part of the data + attr_expr = Group(label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + OneOrMore(attr_expr).parse_string(text).pprint() # Better -> [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'BLACK']] + + # could also be written as + (attr_expr * (1,)).parse_string(text).pprint() + """ + + def _generateDefaultName(self): + return "{" + str(self.expr) + "}..." + + +class ZeroOrMore(_MultipleMatch): + """ + Optional repetition of zero or more of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``stop_on`` - expression for a terminating sentinel + (only required if the sentinel would ordinarily match the repetition + expression) - (default= ``None``) + + Example: similar to :class:`OneOrMore` + """ + + def __init__( + self, + expr: ParserElement, + stop_on: typing.Optional[Union[ParserElement, str]] = None, + *, + stopOn: typing.Optional[Union[ParserElement, str]] = None, + ): + super().__init__(expr, stopOn=stopOn or stop_on) + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + try: + return super().parseImpl(instring, loc, doActions) + except (ParseException, IndexError): + return loc, ParseResults([], name=self.resultsName) + + def _generateDefaultName(self): + return "[" + str(self.expr) + "]..." + + +class _NullToken: + def __bool__(self): + return False + + def __str__(self): + return "" + + +class Opt(ParseElementEnhance): + """ + Optional matching of the given expression. + + Parameters: + - ``expr`` - expression that must match zero or more times + - ``default`` (optional) - value to be returned if the optional expression is not found. + + Example:: + + # US postal code can be a 5-digit zip, plus optional 4-digit qualifier + zip = Combine(Word(nums, exact=5) + Opt('-' + Word(nums, exact=4))) + zip.run_tests(''' + # traditional ZIP code + 12345 + + # ZIP+4 form + 12101-0001 + + # invalid ZIP + 98765- + ''') + + prints:: + + # traditional ZIP code + 12345 + ['12345'] + + # ZIP+4 form + 12101-0001 + ['12101-0001'] + + # invalid ZIP + 98765- + ^ + FAIL: Expected end of text (at char 5), (line:1, col:6) + """ + + __optionalNotMatched = _NullToken() + + def __init__( + self, expr: Union[ParserElement, str], default: Any = __optionalNotMatched + ): + super().__init__(expr, savelist=False) + self.saveAsList = self.expr.saveAsList + self.defaultValue = default + self.mayReturnEmpty = True + + def parseImpl(self, instring, loc, doActions=True): + self_expr = self.expr + try: + loc, tokens = self_expr._parse(instring, loc, doActions, callPreParse=False) + except (ParseException, IndexError): + default_value = self.defaultValue + if default_value is not self.__optionalNotMatched: + if self_expr.resultsName: + tokens = ParseResults([default_value]) + tokens[self_expr.resultsName] = default_value + else: + tokens = [default_value] + else: + tokens = [] + return loc, tokens + + def _generateDefaultName(self): + inner = str(self.expr) + # strip off redundant inner {}'s + while len(inner) > 1 and inner[0 :: len(inner) - 1] == "{}": + inner = inner[1:-1] + return "[" + inner + "]" + + +Optional = Opt + + +class SkipTo(ParseElementEnhance): + """ + Token for skipping over all undefined text until the matched + expression is found. + + Parameters: + - ``expr`` - target expression marking the end of the data to be skipped + - ``include`` - if ``True``, the target expression is also parsed + (the skipped text and target expression are returned as a 2-element + list) (default= ``False``). + - ``ignore`` - (default= ``None``) used to define grammars (typically quoted strings and + comments) that might contain false matches to the target expression + - ``fail_on`` - (default= ``None``) define expressions that are not allowed to be + included in the skipped test; if found before the target expression is found, + the :class:`SkipTo` is not a match + + Example:: + + report = ''' + Outstanding Issues Report - 1 Jan 2000 + + # | Severity | Description | Days Open + -----+----------+-------------------------------------------+----------- + 101 | Critical | Intermittent system crash | 6 + 94 | Cosmetic | Spelling error on Login ('log|n') | 14 + 79 | Minor | System slow when running too many reports | 47 + ''' + integer = Word(nums) + SEP = Suppress('|') + # use SkipTo to simply match everything up until the next SEP + # - ignore quoted strings, so that a '|' character inside a quoted string does not match + # - parse action will call token.strip() for each matched token, i.e., the description body + string_data = SkipTo(SEP, ignore=quoted_string) + string_data.set_parse_action(token_map(str.strip)) + ticket_expr = (integer("issue_num") + SEP + + string_data("sev") + SEP + + string_data("desc") + SEP + + integer("days_open")) + + for tkt in ticket_expr.search_string(report): + print tkt.dump() + + prints:: + + ['101', 'Critical', 'Intermittent system crash', '6'] + - days_open: '6' + - desc: 'Intermittent system crash' + - issue_num: '101' + - sev: 'Critical' + ['94', 'Cosmetic', "Spelling error on Login ('log|n')", '14'] + - days_open: '14' + - desc: "Spelling error on Login ('log|n')" + - issue_num: '94' + - sev: 'Cosmetic' + ['79', 'Minor', 'System slow when running too many reports', '47'] + - days_open: '47' + - desc: 'System slow when running too many reports' + - issue_num: '79' + - sev: 'Minor' + """ + + def __init__( + self, + other: Union[ParserElement, str], + include: bool = False, + ignore: bool = None, + fail_on: typing.Optional[Union[ParserElement, str]] = None, + *, + failOn: Union[ParserElement, str] = None, + ): + super().__init__(other) + failOn = failOn or fail_on + self.ignoreExpr = ignore + self.mayReturnEmpty = True + self.mayIndexError = False + self.includeMatch = include + self.saveAsList = False + if isinstance(failOn, str_type): + self.failOn = self._literalStringClass(failOn) + else: + self.failOn = failOn + self.errmsg = "No match found for " + str(self.expr) + + def parseImpl(self, instring, loc, doActions=True): + startloc = loc + instrlen = len(instring) + self_expr_parse = self.expr._parse + self_failOn_canParseNext = ( + self.failOn.canParseNext if self.failOn is not None else None + ) + self_ignoreExpr_tryParse = ( + self.ignoreExpr.tryParse if self.ignoreExpr is not None else None + ) + + tmploc = loc + while tmploc <= instrlen: + if self_failOn_canParseNext is not None: + # break if failOn expression matches + if self_failOn_canParseNext(instring, tmploc): + break + + if self_ignoreExpr_tryParse is not None: + # advance past ignore expressions + while 1: + try: + tmploc = self_ignoreExpr_tryParse(instring, tmploc) + except ParseBaseException: + break + + try: + self_expr_parse(instring, tmploc, doActions=False, callPreParse=False) + except (ParseException, IndexError): + # no match, advance loc in string + tmploc += 1 + else: + # matched skipto expr, done + break + + else: + # ran off the end of the input string without matching skipto expr, fail + raise ParseException(instring, loc, self.errmsg, self) + + # build up return values + loc = tmploc + skiptext = instring[startloc:loc] + skipresult = ParseResults(skiptext) + + if self.includeMatch: + loc, mat = self_expr_parse(instring, loc, doActions, callPreParse=False) + skipresult += mat + + return loc, skipresult + + +class Forward(ParseElementEnhance): + """ + Forward declaration of an expression to be defined later - + used for recursive grammars, such as algebraic infix notation. + When the expression is known, it is assigned to the ``Forward`` + variable using the ``'<<'`` operator. + + Note: take care when assigning to ``Forward`` not to overlook + precedence of operators. + + Specifically, ``'|'`` has a lower precedence than ``'<<'``, so that:: + + fwd_expr << a | b | c + + will actually be evaluated as:: + + (fwd_expr << a) | b | c + + thereby leaving b and c out as parseable alternatives. It is recommended that you + explicitly group the values inserted into the ``Forward``:: + + fwd_expr << (a | b | c) + + Converting to use the ``'<<='`` operator instead will avoid this problem. + + See :class:`ParseResults.pprint` for an example of a recursive + parser created using ``Forward``. + """ + + def __init__(self, other: typing.Optional[Union[ParserElement, str]] = None): + self.caller_frame = traceback.extract_stack(limit=2)[0] + super().__init__(other, savelist=False) + self.lshift_line = None + + def __lshift__(self, other): + if hasattr(self, "caller_frame"): + del self.caller_frame + if isinstance(other, str_type): + other = self._literalStringClass(other) + self.expr = other + self.mayIndexError = self.expr.mayIndexError + self.mayReturnEmpty = self.expr.mayReturnEmpty + self.set_whitespace_chars( + self.expr.whiteChars, copy_defaults=self.expr.copyDefaultWhiteChars + ) + self.skipWhitespace = self.expr.skipWhitespace + self.saveAsList = self.expr.saveAsList + self.ignoreExprs.extend(self.expr.ignoreExprs) + self.lshift_line = traceback.extract_stack(limit=2)[-2] + return self + + def __ilshift__(self, other): + return self << other + + def __or__(self, other): + caller_line = traceback.extract_stack(limit=2)[-2] + if ( + __diag__.warn_on_match_first_with_lshift_operator + and caller_line == self.lshift_line + and Diagnostics.warn_on_match_first_with_lshift_operator + not in self.suppress_warnings_ + ): + warnings.warn( + "using '<<' operator with '|' is probably an error, use '<<='", + stacklevel=2, + ) + ret = super().__or__(other) + return ret + + def __del__(self): + # see if we are getting dropped because of '=' reassignment of var instead of '<<=' or '<<' + if ( + self.expr is None + and __diag__.warn_on_assignment_to_Forward + and Diagnostics.warn_on_assignment_to_Forward not in self.suppress_warnings_ + ): + warnings.warn_explicit( + "Forward defined here but no expression attached later using '<<=' or '<<'", + UserWarning, + filename=self.caller_frame.filename, + lineno=self.caller_frame.lineno, + ) + + def parseImpl(self, instring, loc, doActions=True): + if ( + self.expr is None + and __diag__.warn_on_parse_using_empty_Forward + and Diagnostics.warn_on_parse_using_empty_Forward + not in self.suppress_warnings_ + ): + # walk stack until parse_string, scan_string, search_string, or transform_string is found + parse_fns = [ + "parse_string", + "scan_string", + "search_string", + "transform_string", + ] + tb = traceback.extract_stack(limit=200) + for i, frm in enumerate(reversed(tb), start=1): + if frm.name in parse_fns: + stacklevel = i + 1 + break + else: + stacklevel = 2 + warnings.warn( + "Forward expression was never assigned a value, will not parse any input", + stacklevel=stacklevel, + ) + if not ParserElement._left_recursion_enabled: + return super().parseImpl(instring, loc, doActions) + # ## Bounded Recursion algorithm ## + # Recursion only needs to be processed at ``Forward`` elements, since they are + # the only ones that can actually refer to themselves. The general idea is + # to handle recursion stepwise: We start at no recursion, then recurse once, + # recurse twice, ..., until more recursion offers no benefit (we hit the bound). + # + # The "trick" here is that each ``Forward`` gets evaluated in two contexts + # - to *match* a specific recursion level, and + # - to *search* the bounded recursion level + # and the two run concurrently. The *search* must *match* each recursion level + # to find the best possible match. This is handled by a memo table, which + # provides the previous match to the next level match attempt. + # + # See also "Left Recursion in Parsing Expression Grammars", Medeiros et al. + # + # There is a complication since we not only *parse* but also *transform* via + # actions: We do not want to run the actions too often while expanding. Thus, + # we expand using `doActions=False` and only run `doActions=True` if the next + # recursion level is acceptable. + with ParserElement.recursion_lock: + memo = ParserElement.recursion_memos + try: + # we are parsing at a specific recursion expansion - use it as-is + prev_loc, prev_result = memo[loc, self, doActions] + if isinstance(prev_result, Exception): + raise prev_result + return prev_loc, prev_result.copy() + except KeyError: + act_key = (loc, self, True) + peek_key = (loc, self, False) + # we are searching for the best recursion expansion - keep on improving + # both `doActions` cases must be tracked separately here! + prev_loc, prev_peek = memo[peek_key] = ( + loc - 1, + ParseException( + instring, loc, "Forward recursion without base case", self + ), + ) + if doActions: + memo[act_key] = memo[peek_key] + while True: + try: + new_loc, new_peek = super().parseImpl(instring, loc, False) + except ParseException: + # we failed before getting any match – do not hide the error + if isinstance(prev_peek, Exception): + raise + new_loc, new_peek = prev_loc, prev_peek + # the match did not get better: we are done + if new_loc <= prev_loc: + if doActions: + # replace the match for doActions=False as well, + # in case the action did backtrack + prev_loc, prev_result = memo[peek_key] = memo[act_key] + del memo[peek_key], memo[act_key] + return prev_loc, prev_result.copy() + del memo[peek_key] + return prev_loc, prev_peek.copy() + # the match did get better: see if we can improve further + else: + if doActions: + try: + memo[act_key] = super().parseImpl(instring, loc, True) + except ParseException as e: + memo[peek_key] = memo[act_key] = (new_loc, e) + raise + prev_loc, prev_peek = memo[peek_key] = new_loc, new_peek + + def leave_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = False + return self + + def ignore_whitespace(self, recursive: bool = True) -> ParserElement: + self.skipWhitespace = True + return self + + def streamline(self) -> ParserElement: + if not self.streamlined: + self.streamlined = True + if self.expr is not None: + self.expr.streamline() + return self + + def validate(self, validateTrace=None) -> None: + if validateTrace is None: + validateTrace = [] + + if self not in validateTrace: + tmp = validateTrace[:] + [self] + if self.expr is not None: + self.expr.validate(tmp) + self._checkRecursion([]) + + def _generateDefaultName(self): + # Avoid infinite recursion by setting a temporary _defaultName + self._defaultName = ": ..." + + # Use the string representation of main expression. + retString = "..." + try: + if self.expr is not None: + retString = str(self.expr)[:1000] + else: + retString = "None" + finally: + return self.__class__.__name__ + ": " + retString + + def copy(self) -> ParserElement: + if self.expr is not None: + return super().copy() + else: + ret = Forward() + ret <<= self + return ret + + def _setResultsName(self, name, list_all_matches=False): + if ( + __diag__.warn_name_set_on_empty_Forward + and Diagnostics.warn_name_set_on_empty_Forward + not in self.suppress_warnings_ + ): + if self.expr is None: + warnings.warn( + "{}: setting results name {!r} on {} expression " + "that has no contained expression".format( + "warn_name_set_on_empty_Forward", name, type(self).__name__ + ), + stacklevel=3, + ) + + return super()._setResultsName(name, list_all_matches) + + ignoreWhitespace = ignore_whitespace + leaveWhitespace = leave_whitespace + + +class TokenConverter(ParseElementEnhance): + """ + Abstract subclass of :class:`ParseExpression`, for converting parsed results. + """ + + def __init__(self, expr: Union[ParserElement, str], savelist=False): + super().__init__(expr) # , savelist) + self.saveAsList = False + + +class Combine(TokenConverter): + """Converter to concatenate all matching tokens to a single string. + By default, the matching patterns must also be contiguous in the + input string; this can be disabled by specifying + ``'adjacent=False'`` in the constructor. + + Example:: + + real = Word(nums) + '.' + Word(nums) + print(real.parse_string('3.1416')) # -> ['3', '.', '1416'] + # will also erroneously match the following + print(real.parse_string('3. 1416')) # -> ['3', '.', '1416'] + + real = Combine(Word(nums) + '.' + Word(nums)) + print(real.parse_string('3.1416')) # -> ['3.1416'] + # no match when there are internal spaces + print(real.parse_string('3. 1416')) # -> Exception: Expected W:(0123...) + """ + + def __init__( + self, + expr: ParserElement, + join_string: str = "", + adjacent: bool = True, + *, + joinString: typing.Optional[str] = None, + ): + super().__init__(expr) + joinString = joinString if joinString is not None else join_string + # suppress whitespace-stripping in contained parse expressions, but re-enable it on the Combine itself + if adjacent: + self.leave_whitespace() + self.adjacent = adjacent + self.skipWhitespace = True + self.joinString = joinString + self.callPreparse = True + + def ignore(self, other) -> ParserElement: + if self.adjacent: + ParserElement.ignore(self, other) + else: + super().ignore(other) + return self + + def postParse(self, instring, loc, tokenlist): + retToks = tokenlist.copy() + del retToks[:] + retToks += ParseResults( + ["".join(tokenlist._asStringList(self.joinString))], modal=self.modalResults + ) + + if self.resultsName and retToks.haskeys(): + return [retToks] + else: + return retToks + + +class Group(TokenConverter): + """Converter to return the matched tokens as a list - useful for + returning tokens of :class:`ZeroOrMore` and :class:`OneOrMore` expressions. + + The optional ``aslist`` argument when set to True will return the + parsed tokens as a Python list instead of a pyparsing ParseResults. + + Example:: + + ident = Word(alphas) + num = Word(nums) + term = ident | num + func = ident + Opt(delimited_list(term)) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', 'a', 'b', '100'] + + func = ident + Group(Opt(delimited_list(term))) + print(func.parse_string("fn a, b, 100")) + # -> ['fn', ['a', 'b', '100']] + """ + + def __init__(self, expr: ParserElement, aslist: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonList = aslist + + def postParse(self, instring, loc, tokenlist): + if self._asPythonList: + return ParseResults.List( + tokenlist.asList() + if isinstance(tokenlist, ParseResults) + else list(tokenlist) + ) + else: + return [tokenlist] + + +class Dict(TokenConverter): + """Converter to return a repetitive expression as a list, but also + as a dictionary. Each element can also be referenced using the first + token in the expression as its key. Useful for tabular report + scraping when the first column can be used as a item key. + + The optional ``asdict`` argument when set to True will return the + parsed tokens as a Python dict instead of a pyparsing ParseResults. + + Example:: + + data_word = Word(alphas) + label = data_word + FollowedBy(':') + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + + # print attributes as plain groups + print(attr_expr[1, ...].parse_string(text).dump()) + + # instead of OneOrMore(expr), parse using Dict(Group(expr)[1, ...]) - Dict will auto-assign names + result = Dict(Group(attr_expr)[1, ...]).parse_string(text) + print(result.dump()) + + # access named fields as dict entries, or output as dict + print(result['shape']) + print(result.as_dict()) + + prints:: + + ['shape', 'SQUARE', 'posn', 'upper left', 'color', 'light blue', 'texture', 'burlap'] + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + {'color': 'light blue', 'posn': 'upper left', 'texture': 'burlap', 'shape': 'SQUARE'} + + See more examples at :class:`ParseResults` of accessing fields by results name. + """ + + def __init__(self, expr: ParserElement, asdict: bool = False): + super().__init__(expr) + self.saveAsList = True + self._asPythonDict = asdict + + def postParse(self, instring, loc, tokenlist): + for i, tok in enumerate(tokenlist): + if len(tok) == 0: + continue + + ikey = tok[0] + if isinstance(ikey, int): + ikey = str(ikey).strip() + + if len(tok) == 1: + tokenlist[ikey] = _ParseResultsWithOffset("", i) + + elif len(tok) == 2 and not isinstance(tok[1], ParseResults): + tokenlist[ikey] = _ParseResultsWithOffset(tok[1], i) + + else: + try: + dictvalue = tok.copy() # ParseResults(i) + except Exception: + exc = TypeError( + "could not extract dict values from parsed results" + " - Dict expression must contain Grouped expressions" + ) + raise exc from None + + del dictvalue[0] + + if len(dictvalue) != 1 or ( + isinstance(dictvalue, ParseResults) and dictvalue.haskeys() + ): + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue, i) + else: + tokenlist[ikey] = _ParseResultsWithOffset(dictvalue[0], i) + + if self._asPythonDict: + return [tokenlist.as_dict()] if self.resultsName else tokenlist.as_dict() + else: + return [tokenlist] if self.resultsName else tokenlist + + +class Suppress(TokenConverter): + """Converter for ignoring the results of a parsed expression. + + Example:: + + source = "a, b, c,d" + wd = Word(alphas) + wd_list1 = wd + (',' + wd)[...] + print(wd_list1.parse_string(source)) + + # often, delimiters that are useful during parsing are just in the + # way afterward - use Suppress to keep them out of the parsed output + wd_list2 = wd + (Suppress(',') + wd)[...] + print(wd_list2.parse_string(source)) + + # Skipped text (using '...') can be suppressed as well + source = "lead in START relevant text END trailing text" + start_marker = Keyword("START") + end_marker = Keyword("END") + find_body = Suppress(...) + start_marker + ... + end_marker + print(find_body.parse_string(source) + + prints:: + + ['a', ',', 'b', ',', 'c', ',', 'd'] + ['a', 'b', 'c', 'd'] + ['START', 'relevant text ', 'END'] + + (See also :class:`delimited_list`.) + """ + + def __init__(self, expr: Union[ParserElement, str], savelist: bool = False): + if expr is ...: + expr = _PendingSkip(NoMatch()) + super().__init__(expr) + + def __add__(self, other) -> "ParserElement": + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) + other + else: + return super().__add__(other) + + def __sub__(self, other) -> "ParserElement": + if isinstance(self.expr, _PendingSkip): + return Suppress(SkipTo(other)) - other + else: + return super().__sub__(other) + + def postParse(self, instring, loc, tokenlist): + return [] + + def suppress(self) -> ParserElement: + return self + + +def trace_parse_action(f: ParseAction) -> ParseAction: + """Decorator for debugging parse actions. + + When the parse action is called, this decorator will print + ``">> entering method-name(line:, , )"``. + When the parse action completes, the decorator will print + ``"<<"`` followed by the returned value, or any exception that the parse action raised. + + Example:: + + wd = Word(alphas) + + @trace_parse_action + def remove_duplicate_chars(tokens): + return ''.join(sorted(set(''.join(tokens)))) + + wds = wd[1, ...].set_parse_action(remove_duplicate_chars) + print(wds.parse_string("slkdjs sld sldd sdlf sdljf")) + + prints:: + + >>entering remove_duplicate_chars(line: 'slkdjs sld sldd sdlf sdljf', 0, (['slkdjs', 'sld', 'sldd', 'sdlf', 'sdljf'], {})) + < 3: + thisFunc = paArgs[0].__class__.__name__ + "." + thisFunc + sys.stderr.write( + ">>entering {}(line: {!r}, {}, {!r})\n".format(thisFunc, line(l, s), l, t) + ) + try: + ret = f(*paArgs) + except Exception as exc: + sys.stderr.write("< str: + r"""Helper to easily define string ranges for use in :class:`Word` + construction. Borrows syntax from regexp ``'[]'`` string range + definitions:: + + srange("[0-9]") -> "0123456789" + srange("[a-z]") -> "abcdefghijklmnopqrstuvwxyz" + srange("[a-z$_]") -> "abcdefghijklmnopqrstuvwxyz$_" + + The input string must be enclosed in []'s, and the returned string + is the expanded character set joined into a single string. The + values enclosed in the []'s may be: + + - a single character + - an escaped character with a leading backslash (such as ``\-`` + or ``\]``) + - an escaped hex character with a leading ``'\x'`` + (``\x21``, which is a ``'!'`` character) (``\0x##`` + is also supported for backwards compatibility) + - an escaped octal character with a leading ``'\0'`` + (``\041``, which is a ``'!'`` character) + - a range of any of the above, separated by a dash (``'a-z'``, + etc.) + - any combination of the above (``'aeiouy'``, + ``'a-zA-Z0-9_$'``, etc.) + """ + _expanded = ( + lambda p: p + if not isinstance(p, ParseResults) + else "".join(chr(c) for c in range(ord(p[0]), ord(p[1]) + 1)) + ) + try: + return "".join(_expanded(part) for part in _reBracketExpr.parse_string(s).body) + except Exception: + return "" + + +def token_map(func, *args) -> ParseAction: + """Helper to define a parse action by mapping a function to all + elements of a :class:`ParseResults` list. If any additional args are passed, + they are forwarded to the given function as additional arguments + after the token, as in + ``hex_integer = Word(hexnums).set_parse_action(token_map(int, 16))``, + which will convert the parsed data to an integer using base 16. + + Example (compare the last to example in :class:`ParserElement.transform_string`:: + + hex_ints = Word(hexnums)[1, ...].set_parse_action(token_map(int, 16)) + hex_ints.run_tests(''' + 00 11 22 aa FF 0a 0d 1a + ''') + + upperword = Word(alphas).set_parse_action(token_map(str.upper)) + upperword[1, ...].run_tests(''' + my kingdom for a horse + ''') + + wd = Word(alphas).set_parse_action(token_map(str.title)) + wd[1, ...].set_parse_action(' '.join).run_tests(''' + now is the winter of our discontent made glorious summer by this sun of york + ''') + + prints:: + + 00 11 22 aa FF 0a 0d 1a + [0, 17, 34, 170, 255, 10, 13, 26] + + my kingdom for a horse + ['MY', 'KINGDOM', 'FOR', 'A', 'HORSE'] + + now is the winter of our discontent made glorious summer by this sun of york + ['Now Is The Winter Of Our Discontent Made Glorious Summer By This Sun Of York'] + """ + + def pa(s, l, t): + return [func(tokn, *args) for tokn in t] + + func_name = getattr(func, "__name__", getattr(func, "__class__").__name__) + pa.__name__ = func_name + + return pa + + +def autoname_elements() -> None: + """ + Utility to simplify mass-naming of parser elements, for + generating railroad diagram with named subdiagrams. + """ + for name, var in sys._getframe().f_back.f_locals.items(): + if isinstance(var, ParserElement) and not var.customName: + var.set_name(name) + + +dbl_quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' +).set_name("string enclosed in double quotes") + +sgl_quoted_string = Combine( + Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("string enclosed in single quotes") + +quoted_string = Combine( + Regex(r'"(?:[^"\n\r\\]|(?:"")|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*') + '"' + | Regex(r"'(?:[^'\n\r\\]|(?:'')|(?:\\(?:[^x]|x[0-9a-fA-F]+)))*") + "'" +).set_name("quotedString using single or double quotes") + +unicode_string = Combine("u" + quoted_string.copy()).set_name("unicode string literal") + + +alphas8bit = srange(r"[\0xc0-\0xd6\0xd8-\0xf6\0xf8-\0xff]") +punc8bit = srange(r"[\0xa1-\0xbf\0xd7\0xf7]") + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: List[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + +# backward compatibility names +tokenMap = token_map +conditionAsParseAction = condition_as_parse_action +nullDebugAction = null_debug_action +sglQuotedString = sgl_quoted_string +dblQuotedString = dbl_quoted_string +quotedString = quoted_string +unicodeString = unicode_string +lineStart = line_start +lineEnd = line_end +stringStart = string_start +stringEnd = string_end +traceParseAction = trace_parse_action diff --git a/src/poetry/core/_vendor/pyparsing/diagram/__init__.py b/src/poetry/core/_vendor/pyparsing/diagram/__init__.py new file mode 100644 index 0000000..8986447 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/diagram/__init__.py @@ -0,0 +1,642 @@ +import railroad +import pyparsing +import typing +from typing import ( + List, + NamedTuple, + Generic, + TypeVar, + Dict, + Callable, + Set, + Iterable, +) +from jinja2 import Template +from io import StringIO +import inspect + + +jinja2_template_source = """\ + + + + {% if not head %} + + {% else %} + {{ head | safe }} + {% endif %} + + +{{ body | safe }} +{% for diagram in diagrams %} +
+

{{ diagram.title }}

+
{{ diagram.text }}
+
+ {{ diagram.svg }} +
+
+{% endfor %} + + +""" + +template = Template(jinja2_template_source) + +# Note: ideally this would be a dataclass, but we're supporting Python 3.5+ so we can't do this yet +NamedDiagram = NamedTuple( + "NamedDiagram", + [("name", str), ("diagram", typing.Optional[railroad.DiagramItem]), ("index", int)], +) +""" +A simple structure for associating a name with a railroad diagram +""" + +T = TypeVar("T") + + +class EachItem(railroad.Group): + """ + Custom railroad item to compose a: + - Group containing a + - OneOrMore containing a + - Choice of the elements in the Each + with the group label indicating that all must be matched + """ + + all_label = "[ALL]" + + def __init__(self, *items): + choice_item = railroad.Choice(len(items) - 1, *items) + one_or_more_item = railroad.OneOrMore(item=choice_item) + super().__init__(one_or_more_item, label=self.all_label) + + +class AnnotatedItem(railroad.Group): + """ + Simple subclass of Group that creates an annotation label + """ + + def __init__(self, label: str, item): + super().__init__(item=item, label="[{}]".format(label) if label else label) + + +class EditablePartial(Generic[T]): + """ + Acts like a functools.partial, but can be edited. In other words, it represents a type that hasn't yet been + constructed. + """ + + # We need this here because the railroad constructors actually transform the data, so can't be called until the + # entire tree is assembled + + def __init__(self, func: Callable[..., T], args: list, kwargs: dict): + self.func = func + self.args = args + self.kwargs = kwargs + + @classmethod + def from_call(cls, func: Callable[..., T], *args, **kwargs) -> "EditablePartial[T]": + """ + If you call this function in the same way that you would call the constructor, it will store the arguments + as you expect. For example EditablePartial.from_call(Fraction, 1, 3)() == Fraction(1, 3) + """ + return EditablePartial(func=func, args=list(args), kwargs=kwargs) + + @property + def name(self): + return self.kwargs["name"] + + def __call__(self) -> T: + """ + Evaluate the partial and return the result + """ + args = self.args.copy() + kwargs = self.kwargs.copy() + + # This is a helpful hack to allow you to specify varargs parameters (e.g. *args) as keyword args (e.g. + # args=['list', 'of', 'things']) + arg_spec = inspect.getfullargspec(self.func) + if arg_spec.varargs in self.kwargs: + args += kwargs.pop(arg_spec.varargs) + + return self.func(*args, **kwargs) + + +def railroad_to_html(diagrams: List[NamedDiagram], **kwargs) -> str: + """ + Given a list of NamedDiagram, produce a single HTML string that visualises those diagrams + :params kwargs: kwargs to be passed in to the template + """ + data = [] + for diagram in diagrams: + if diagram.diagram is None: + continue + io = StringIO() + diagram.diagram.writeSvg(io.write) + title = diagram.name + if diagram.index == 0: + title += " (root)" + data.append({"title": title, "text": "", "svg": io.getvalue()}) + + return template.render(diagrams=data, **kwargs) + + +def resolve_partial(partial: "EditablePartial[T]") -> T: + """ + Recursively resolves a collection of Partials into whatever type they are + """ + if isinstance(partial, EditablePartial): + partial.args = resolve_partial(partial.args) + partial.kwargs = resolve_partial(partial.kwargs) + return partial() + elif isinstance(partial, list): + return [resolve_partial(x) for x in partial] + elif isinstance(partial, dict): + return {key: resolve_partial(x) for key, x in partial.items()} + else: + return partial + + +def to_railroad( + element: pyparsing.ParserElement, + diagram_kwargs: typing.Optional[dict] = None, + vertical: int = 3, + show_results_names: bool = False, + show_groups: bool = False, +) -> List[NamedDiagram]: + """ + Convert a pyparsing element tree into a list of diagrams. This is the recommended entrypoint to diagram + creation if you want to access the Railroad tree before it is converted to HTML + :param element: base element of the parser being diagrammed + :param diagram_kwargs: kwargs to pass to the Diagram() constructor + :param vertical: (optional) - int - limit at which number of alternatives should be + shown vertically instead of horizontally + :param show_results_names - bool to indicate whether results name annotations should be + included in the diagram + :param show_groups - bool to indicate whether groups should be highlighted with an unlabeled + surrounding box + """ + # Convert the whole tree underneath the root + lookup = ConverterState(diagram_kwargs=diagram_kwargs or {}) + _to_diagram_element( + element, + lookup=lookup, + parent=None, + vertical=vertical, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + root_id = id(element) + # Convert the root if it hasn't been already + if root_id in lookup: + if not element.customName: + lookup[root_id].name = "" + lookup[root_id].mark_for_extraction(root_id, lookup, force=True) + + # Now that we're finished, we can convert from intermediate structures into Railroad elements + diags = list(lookup.diagrams.values()) + if len(diags) > 1: + # collapse out duplicate diags with the same name + seen = set() + deduped_diags = [] + for d in diags: + # don't extract SkipTo elements, they are uninformative as subdiagrams + if d.name == "...": + continue + if d.name is not None and d.name not in seen: + seen.add(d.name) + deduped_diags.append(d) + resolved = [resolve_partial(partial) for partial in deduped_diags] + else: + # special case - if just one diagram, always display it, even if + # it has no name + resolved = [resolve_partial(partial) for partial in diags] + return sorted(resolved, key=lambda diag: diag.index) + + +def _should_vertical( + specification: int, exprs: Iterable[pyparsing.ParserElement] +) -> bool: + """ + Returns true if we should return a vertical list of elements + """ + if specification is None: + return False + else: + return len(_visible_exprs(exprs)) >= specification + + +class ElementState: + """ + State recorded for an individual pyparsing Element + """ + + # Note: this should be a dataclass, but we have to support Python 3.5 + def __init__( + self, + element: pyparsing.ParserElement, + converted: EditablePartial, + parent: EditablePartial, + number: int, + name: str = None, + parent_index: typing.Optional[int] = None, + ): + #: The pyparsing element that this represents + self.element: pyparsing.ParserElement = element + #: The name of the element + self.name: typing.Optional[str] = name + #: The output Railroad element in an unconverted state + self.converted: EditablePartial = converted + #: The parent Railroad element, which we store so that we can extract this if it's duplicated + self.parent: EditablePartial = parent + #: The order in which we found this element, used for sorting diagrams if this is extracted into a diagram + self.number: int = number + #: The index of this inside its parent + self.parent_index: typing.Optional[int] = parent_index + #: If true, we should extract this out into a subdiagram + self.extract: bool = False + #: If true, all of this element's children have been filled out + self.complete: bool = False + + def mark_for_extraction( + self, el_id: int, state: "ConverterState", name: str = None, force: bool = False + ): + """ + Called when this instance has been seen twice, and thus should eventually be extracted into a sub-diagram + :param el_id: id of the element + :param state: element/diagram state tracker + :param name: name to use for this element's text + :param force: If true, force extraction now, regardless of the state of this. Only useful for extracting the + root element when we know we're finished + """ + self.extract = True + + # Set the name + if not self.name: + if name: + # Allow forcing a custom name + self.name = name + elif self.element.customName: + self.name = self.element.customName + else: + self.name = "" + + # Just because this is marked for extraction doesn't mean we can do it yet. We may have to wait for children + # to be added + # Also, if this is just a string literal etc, don't bother extracting it + if force or (self.complete and _worth_extracting(self.element)): + state.extract_into_diagram(el_id) + + +class ConverterState: + """ + Stores some state that persists between recursions into the element tree + """ + + def __init__(self, diagram_kwargs: typing.Optional[dict] = None): + #: A dictionary mapping ParserElements to state relating to them + self._element_diagram_states: Dict[int, ElementState] = {} + #: A dictionary mapping ParserElement IDs to subdiagrams generated from them + self.diagrams: Dict[int, EditablePartial[NamedDiagram]] = {} + #: The index of the next unnamed element + self.unnamed_index: int = 1 + #: The index of the next element. This is used for sorting + self.index: int = 0 + #: Shared kwargs that are used to customize the construction of diagrams + self.diagram_kwargs: dict = diagram_kwargs or {} + self.extracted_diagram_names: Set[str] = set() + + def __setitem__(self, key: int, value: ElementState): + self._element_diagram_states[key] = value + + def __getitem__(self, key: int) -> ElementState: + return self._element_diagram_states[key] + + def __delitem__(self, key: int): + del self._element_diagram_states[key] + + def __contains__(self, key: int): + return key in self._element_diagram_states + + def generate_unnamed(self) -> int: + """ + Generate a number used in the name of an otherwise unnamed diagram + """ + self.unnamed_index += 1 + return self.unnamed_index + + def generate_index(self) -> int: + """ + Generate a number used to index a diagram + """ + self.index += 1 + return self.index + + def extract_into_diagram(self, el_id: int): + """ + Used when we encounter the same token twice in the same tree. When this + happens, we replace all instances of that token with a terminal, and + create a new subdiagram for the token + """ + position = self[el_id] + + # Replace the original definition of this element with a regular block + if position.parent: + ret = EditablePartial.from_call(railroad.NonTerminal, text=position.name) + if "item" in position.parent.kwargs: + position.parent.kwargs["item"] = ret + elif "items" in position.parent.kwargs: + position.parent.kwargs["items"][position.parent_index] = ret + + # If the element we're extracting is a group, skip to its content but keep the title + if position.converted.func == railroad.Group: + content = position.converted.kwargs["item"] + else: + content = position.converted + + self.diagrams[el_id] = EditablePartial.from_call( + NamedDiagram, + name=position.name, + diagram=EditablePartial.from_call( + railroad.Diagram, content, **self.diagram_kwargs + ), + index=position.number, + ) + + del self[el_id] + + +def _worth_extracting(element: pyparsing.ParserElement) -> bool: + """ + Returns true if this element is worth having its own sub-diagram. Simply, if any of its children + themselves have children, then its complex enough to extract + """ + children = element.recurse() + return any(child.recurse() for child in children) + + +def _apply_diagram_item_enhancements(fn): + """ + decorator to ensure enhancements to a diagram item (such as results name annotations) + get applied on return from _to_diagram_element (we do this since there are several + returns in _to_diagram_element) + """ + + def _inner( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, + ) -> typing.Optional[EditablePartial]: + + ret = fn( + element, + parent, + lookup, + vertical, + index, + name_hint, + show_results_names, + show_groups, + ) + + # apply annotation for results name, if present + if show_results_names and ret is not None: + element_results_name = element.resultsName + if element_results_name: + # add "*" to indicate if this is a "list all results" name + element_results_name += "" if element.modalResults else "*" + ret = EditablePartial.from_call( + railroad.Group, item=ret, label=element_results_name + ) + + return ret + + return _inner + + +def _visible_exprs(exprs: Iterable[pyparsing.ParserElement]): + non_diagramming_exprs = ( + pyparsing.ParseElementEnhance, + pyparsing.PositionToken, + pyparsing.And._ErrorStop, + ) + return [ + e + for e in exprs + if not (e.customName or e.resultsName or isinstance(e, non_diagramming_exprs)) + ] + + +@_apply_diagram_item_enhancements +def _to_diagram_element( + element: pyparsing.ParserElement, + parent: typing.Optional[EditablePartial], + lookup: ConverterState = None, + vertical: int = None, + index: int = 0, + name_hint: str = None, + show_results_names: bool = False, + show_groups: bool = False, +) -> typing.Optional[EditablePartial]: + """ + Recursively converts a PyParsing Element to a railroad Element + :param lookup: The shared converter state that keeps track of useful things + :param index: The index of this element within the parent + :param parent: The parent of this element in the output tree + :param vertical: Controls at what point we make a list of elements vertical. If this is an integer (the default), + it sets the threshold of the number of items before we go vertical. If True, always go vertical, if False, never + do so + :param name_hint: If provided, this will override the generated name + :param show_results_names: bool flag indicating whether to add annotations for results names + :returns: The converted version of the input element, but as a Partial that hasn't yet been constructed + :param show_groups: bool flag indicating whether to show groups using bounding box + """ + exprs = element.recurse() + name = name_hint or element.customName or element.__class__.__name__ + + # Python's id() is used to provide a unique identifier for elements + el_id = id(element) + + element_results_name = element.resultsName + + # Here we basically bypass processing certain wrapper elements if they contribute nothing to the diagram + if not element.customName: + if isinstance( + element, + ( + # pyparsing.TokenConverter, + # pyparsing.Forward, + pyparsing.Located, + ), + ): + # However, if this element has a useful custom name, and its child does not, we can pass it on to the child + if exprs: + if not exprs[0].customName: + propagated_name = name + else: + propagated_name = None + + return _to_diagram_element( + element.expr, + parent=parent, + lookup=lookup, + vertical=vertical, + index=index, + name_hint=propagated_name, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # If the element isn't worth extracting, we always treat it as the first time we say it + if _worth_extracting(element): + if el_id in lookup: + # If we've seen this element exactly once before, we are only just now finding out that it's a duplicate, + # so we have to extract it into a new diagram. + looked_up = lookup[el_id] + looked_up.mark_for_extraction(el_id, lookup, name=name_hint) + ret = EditablePartial.from_call(railroad.NonTerminal, text=looked_up.name) + return ret + + elif el_id in lookup.diagrams: + # If we have seen the element at least twice before, and have already extracted it into a subdiagram, we + # just put in a marker element that refers to the sub-diagram + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + return ret + + # Recursively convert child elements + # Here we find the most relevant Railroad element for matching pyparsing Element + # We use ``items=[]`` here to hold the place for where the child elements will go once created + if isinstance(element, pyparsing.And): + # detect And's created with ``expr*N`` notation - for these use a OneOrMore with a repeat + # (all will have the same name, and resultsName) + if not exprs: + return None + if len(set((e.name, e.resultsName) for e in exprs)) == 1: + ret = EditablePartial.from_call( + railroad.OneOrMore, item="", repeat=str(len(exprs)) + ) + elif _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Stack, items=[]) + else: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif isinstance(element, (pyparsing.Or, pyparsing.MatchFirst)): + if not exprs: + return None + if _should_vertical(vertical, exprs): + ret = EditablePartial.from_call(railroad.Choice, 0, items=[]) + else: + ret = EditablePartial.from_call(railroad.HorizontalChoice, items=[]) + elif isinstance(element, pyparsing.Each): + if not exprs: + return None + ret = EditablePartial.from_call(EachItem, items=[]) + elif isinstance(element, pyparsing.NotAny): + ret = EditablePartial.from_call(AnnotatedItem, label="NOT", item="") + elif isinstance(element, pyparsing.FollowedBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKAHEAD", item="") + elif isinstance(element, pyparsing.PrecededBy): + ret = EditablePartial.from_call(AnnotatedItem, label="LOOKBEHIND", item="") + elif isinstance(element, pyparsing.Group): + if show_groups: + ret = EditablePartial.from_call(AnnotatedItem, label="", item="") + else: + ret = EditablePartial.from_call(railroad.Group, label="", item="") + elif isinstance(element, pyparsing.TokenConverter): + ret = EditablePartial.from_call( + AnnotatedItem, label=type(element).__name__.lower(), item="" + ) + elif isinstance(element, pyparsing.Opt): + ret = EditablePartial.from_call(railroad.Optional, item="") + elif isinstance(element, pyparsing.OneOrMore): + ret = EditablePartial.from_call(railroad.OneOrMore, item="") + elif isinstance(element, pyparsing.ZeroOrMore): + ret = EditablePartial.from_call(railroad.ZeroOrMore, item="") + elif isinstance(element, pyparsing.Group): + ret = EditablePartial.from_call( + railroad.Group, item=None, label=element_results_name + ) + elif isinstance(element, pyparsing.Empty) and not element.customName: + # Skip unnamed "Empty" elements + ret = None + elif len(exprs) > 1: + ret = EditablePartial.from_call(railroad.Sequence, items=[]) + elif len(exprs) > 0 and not element_results_name: + ret = EditablePartial.from_call(railroad.Group, item="", label=name) + else: + terminal = EditablePartial.from_call(railroad.Terminal, element.defaultName) + ret = terminal + + if ret is None: + return + + # Indicate this element's position in the tree so we can extract it if necessary + lookup[el_id] = ElementState( + element=element, + converted=ret, + parent=parent, + parent_index=index, + number=lookup.generate_index(), + ) + if element.customName: + lookup[el_id].mark_for_extraction(el_id, lookup, element.customName) + + i = 0 + for expr in exprs: + # Add a placeholder index in case we have to extract the child before we even add it to the parent + if "items" in ret.kwargs: + ret.kwargs["items"].insert(i, None) + + item = _to_diagram_element( + expr, + parent=ret, + lookup=lookup, + vertical=vertical, + index=i, + show_results_names=show_results_names, + show_groups=show_groups, + ) + + # Some elements don't need to be shown in the diagram + if item is not None: + if "item" in ret.kwargs: + ret.kwargs["item"] = item + elif "items" in ret.kwargs: + # If we've already extracted the child, don't touch this index, since it's occupied by a nonterminal + ret.kwargs["items"][i] = item + i += 1 + elif "items" in ret.kwargs: + # If we're supposed to skip this element, remove it from the parent + del ret.kwargs["items"][i] + + # If all this items children are none, skip this item + if ret and ( + ("items" in ret.kwargs and len(ret.kwargs["items"]) == 0) + or ("item" in ret.kwargs and ret.kwargs["item"] is None) + ): + ret = EditablePartial.from_call(railroad.Terminal, name) + + # Mark this element as "complete", ie it has all of its children + if el_id in lookup: + lookup[el_id].complete = True + + if el_id in lookup and lookup[el_id].extract and lookup[el_id].complete: + lookup.extract_into_diagram(el_id) + if ret is not None: + ret = EditablePartial.from_call( + railroad.NonTerminal, text=lookup.diagrams[el_id].kwargs["name"] + ) + + return ret diff --git a/src/poetry/core/_vendor/pyparsing/exceptions.py b/src/poetry/core/_vendor/pyparsing/exceptions.py new file mode 100644 index 0000000..a38447b --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/exceptions.py @@ -0,0 +1,267 @@ +# exceptions.py + +import re +import sys +import typing + +from .util import col, line, lineno, _collapse_string_to_ranges +from .unicode import pyparsing_unicode as ppu + + +class ExceptionWordUnicode(ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic): + pass + + +_extract_alphanums = _collapse_string_to_ranges(ExceptionWordUnicode.alphanums) +_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.") + + +class ParseBaseException(Exception): + """base exception class for all parsing runtime exceptions""" + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, + pstr: str, + loc: int = 0, + msg: typing.Optional[str] = None, + elem=None, + ): + self.loc = loc + if msg is None: + self.msg = pstr + self.pstr = "" + else: + self.msg = msg + self.pstr = pstr + self.parser_element = self.parserElement = elem + self.args = (pstr, loc, msg) + + @staticmethod + def explain_exception(exc, depth=16): + """ + Method to take an exception and translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - exc - exception raised during parsing (need not be a ParseException, in support + of Python exceptions that might be raised in a parse action) + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + """ + import inspect + from .core import ParserElement + + if depth is None: + depth = sys.getrecursionlimit() + ret = [] + if isinstance(exc, ParseBaseException): + ret.append(exc.line) + ret.append(" " * (exc.column - 1) + "^") + ret.append("{}: {}".format(type(exc).__name__, exc)) + + if depth > 0: + callers = inspect.getinnerframes(exc.__traceback__, context=depth) + seen = set() + for i, ff in enumerate(callers[-depth:]): + frm = ff[0] + + f_self = frm.f_locals.get("self", None) + if isinstance(f_self, ParserElement): + if frm.f_code.co_name not in ("parseImpl", "_parseNoCache"): + continue + if id(f_self) in seen: + continue + seen.add(id(f_self)) + + self_type = type(f_self) + ret.append( + "{}.{} - {}".format( + self_type.__module__, self_type.__name__, f_self + ) + ) + + elif f_self is not None: + self_type = type(f_self) + ret.append("{}.{}".format(self_type.__module__, self_type.__name__)) + + else: + code = frm.f_code + if code.co_name in ("wrapper", ""): + continue + + ret.append("{}".format(code.co_name)) + + depth -= 1 + if not depth: + break + + return "\n".join(ret) + + @classmethod + def _from_exception(cls, pe): + """ + internal factory method to simplify creating one type of ParseException + from another - avoids having __init__ signature conflicts among subclasses + """ + return cls(pe.pstr, pe.loc, pe.msg, pe.parserElement) + + @property + def line(self) -> str: + """ + Return the line of text where the exception occurred. + """ + return line(self.loc, self.pstr) + + @property + def lineno(self) -> int: + """ + Return the 1-based line number of text where the exception occurred. + """ + return lineno(self.loc, self.pstr) + + @property + def col(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + @property + def column(self) -> int: + """ + Return the 1-based column on the line of text where the exception occurred. + """ + return col(self.loc, self.pstr) + + def __str__(self) -> str: + if self.pstr: + if self.loc >= len(self.pstr): + foundstr = ", found end of text" + else: + # pull out next word at error location + found_match = _exception_word_extractor.match(self.pstr, self.loc) + if found_match is not None: + found = found_match.group(0) + else: + found = self.pstr[self.loc : self.loc + 1] + foundstr = (", found %r" % found).replace(r"\\", "\\") + else: + foundstr = "" + return "{}{} (at char {}), (line:{}, col:{})".format( + self.msg, foundstr, self.loc, self.lineno, self.column + ) + + def __repr__(self): + return str(self) + + def mark_input_line(self, marker_string: str = None, *, markerString=">!<") -> str: + """ + Extracts the exception line from the input string, and marks + the location of the exception with a special symbol. + """ + markerString = marker_string if marker_string is not None else markerString + line_str = self.line + line_column = self.column - 1 + if markerString: + line_str = "".join( + (line_str[:line_column], markerString, line_str[line_column:]) + ) + return line_str.strip() + + def explain(self, depth=16) -> str: + """ + Method to translate the Python internal traceback into a list + of the pyparsing expressions that caused the exception to be raised. + + Parameters: + + - depth (default=16) - number of levels back in the stack trace to list expression + and function names; if None, the full stack trace names will be listed; if 0, only + the failing input line, marker, and exception string will be shown + + Returns a multi-line string listing the ParserElements and/or function names in the + exception's stack trace. + + Example:: + + expr = pp.Word(pp.nums) * 3 + try: + expr.parse_string("123 456 A789") + except pp.ParseException as pe: + print(pe.explain(depth=0)) + + prints:: + + 123 456 A789 + ^ + ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9) + + Note: the diagnostic output will include string representations of the expressions + that failed to parse. These representations will be more helpful if you use `set_name` to + give identifiable names to your expressions. Otherwise they will use the default string + forms, which may be cryptic to read. + + Note: pyparsing's default truncation of exception tracebacks may also truncate the + stack of expressions that are displayed in the ``explain`` output. To get the full listing + of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True`` + """ + return self.explain_exception(self, depth) + + markInputline = mark_input_line + + +class ParseException(ParseBaseException): + """ + Exception thrown when a parse expression doesn't match the input string + + Example:: + + try: + Word(nums).set_name("integer").parse_string("ABC") + except ParseException as pe: + print(pe) + print("column: {}".format(pe.column)) + + prints:: + + Expected integer (at char 0), (line:1, col:1) + column: 1 + + """ + + +class ParseFatalException(ParseBaseException): + """ + User-throwable exception thrown when inconsistent parse content + is found; stops all parsing immediately + """ + + +class ParseSyntaxException(ParseFatalException): + """ + Just like :class:`ParseFatalException`, but thrown internally + when an :class:`ErrorStop` ('-' operator) indicates + that parsing is to stop immediately because an unbacktrackable + syntax error has been found. + """ + + +class RecursiveGrammarException(Exception): + """ + Exception thrown by :class:`ParserElement.validate` if the + grammar could be left-recursive; parser may need to enable + left recursion using :class:`ParserElement.enable_left_recursion` + """ + + def __init__(self, parseElementList): + self.parseElementTrace = parseElementList + + def __str__(self) -> str: + return "RecursiveGrammarException: {}".format(self.parseElementTrace) diff --git a/src/poetry/core/_vendor/pyparsing/helpers.py b/src/poetry/core/_vendor/pyparsing/helpers.py new file mode 100644 index 0000000..9588b3b --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/helpers.py @@ -0,0 +1,1088 @@ +# helpers.py +import html.entities +import re +import typing + +from . import __diag__ +from .core import * +from .util import _bslash, _flatten, _escape_regex_range_chars + + +# +# global helpers +# +def delimited_list( + expr: Union[str, ParserElement], + delim: Union[str, ParserElement] = ",", + combine: bool = False, + min: typing.Optional[int] = None, + max: typing.Optional[int] = None, + *, + allow_trailing_delim: bool = False, +) -> ParserElement: + """Helper to define a delimited list of expressions - the delimiter + defaults to ','. By default, the list elements and delimiters can + have intervening whitespace, and comments, but this can be + overridden by passing ``combine=True`` in the constructor. If + ``combine`` is set to ``True``, the matching tokens are + returned as a single token string, with the delimiters included; + otherwise, the matching tokens are returned as a list of tokens, + with the delimiters suppressed. + + If ``allow_trailing_delim`` is set to True, then the list may end with + a delimiter. + + Example:: + + delimited_list(Word(alphas)).parse_string("aa,bb,cc") # -> ['aa', 'bb', 'cc'] + delimited_list(Word(hexnums), delim=':', combine=True).parse_string("AA:BB:CC:DD:EE") # -> ['AA:BB:CC:DD:EE'] + """ + if isinstance(expr, str_type): + expr = ParserElement._literalStringClass(expr) + + dlName = "{expr} [{delim} {expr}]...{end}".format( + expr=str(expr.copy().streamline()), + delim=str(delim), + end=" [{}]".format(str(delim)) if allow_trailing_delim else "", + ) + + if not combine: + delim = Suppress(delim) + + if min is not None: + if min < 1: + raise ValueError("min must be greater than 0") + min -= 1 + if max is not None: + if min is not None and max <= min: + raise ValueError("max must be greater than, or equal to min") + max -= 1 + delimited_list_expr = expr + (delim + expr)[min, max] + + if allow_trailing_delim: + delimited_list_expr += Opt(delim) + + if combine: + return Combine(delimited_list_expr).set_name(dlName) + else: + return delimited_list_expr.set_name(dlName) + + +def counted_array( + expr: ParserElement, + int_expr: typing.Optional[ParserElement] = None, + *, + intExpr: typing.Optional[ParserElement] = None, +) -> ParserElement: + """Helper to define a counted list of expressions. + + This helper defines a pattern of the form:: + + integer expr expr expr... + + where the leading integer tells how many expr expressions follow. + The matched tokens returns the array of expr tokens as a list - the + leading count token is suppressed. + + If ``int_expr`` is specified, it should be a pyparsing expression + that produces an integer value. + + Example:: + + counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd'] + + # in this parser, the leading integer value is given in binary, + # '10' indicating that 2 values are in the array + binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2)) + counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd'] + + # if other fields must be parsed after the count but before the + # list items, give the fields results names and they will + # be preserved in the returned ParseResults: + count_with_metadata = integer + Word(alphas)("type") + typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items") + result = typed_array.parse_string("3 bool True True False") + print(result.dump()) + + # prints + # ['True', 'True', 'False'] + # - items: ['True', 'True', 'False'] + # - type: 'bool' + """ + intExpr = intExpr or int_expr + array_expr = Forward() + + def count_field_parse_action(s, l, t): + nonlocal array_expr + n = t[0] + array_expr <<= (expr * n) if n else Empty() + # clear list contents, but keep any named results + del t[:] + + if intExpr is None: + intExpr = Word(nums).set_parse_action(lambda t: int(t[0])) + else: + intExpr = intExpr.copy() + intExpr.set_name("arrayLen") + intExpr.add_parse_action(count_field_parse_action, call_during_try=True) + return (intExpr + array_expr).set_name("(len) " + str(expr) + "...") + + +def match_previous_literal(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_literal(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches a previous literal, will also match the leading + ``"1:1"`` in ``"1:10"``. If this is not desired, use + :class:`match_previous_expr`. Do *not* use with packrat parsing + enabled. + """ + rep = Forward() + + def copy_token_to_repeater(s, l, t): + if t: + if len(t) == 1: + rep << t[0] + else: + # flatten t tokens + tflat = _flatten(t.as_list()) + rep << And(Literal(tt) for tt in tflat) + else: + rep << Empty() + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def match_previous_expr(expr: ParserElement) -> ParserElement: + """Helper to define an expression that is indirectly defined from + the tokens matched in a previous expression, that is, it looks for + a 'repeat' of a previous expression. For example:: + + first = Word(nums) + second = match_previous_expr(first) + match_expr = first + ":" + second + + will match ``"1:1"``, but not ``"1:2"``. Because this + matches by expressions, will *not* match the leading ``"1:1"`` + in ``"1:10"``; the expressions are evaluated first, and then + compared, so ``"1"`` is compared with ``"10"``. Do *not* use + with packrat parsing enabled. + """ + rep = Forward() + e2 = expr.copy() + rep <<= e2 + + def copy_token_to_repeater(s, l, t): + matchTokens = _flatten(t.as_list()) + + def must_match_these_tokens(s, l, t): + theseTokens = _flatten(t.as_list()) + if theseTokens != matchTokens: + raise ParseException( + s, l, "Expected {}, found{}".format(matchTokens, theseTokens) + ) + + rep.set_parse_action(must_match_these_tokens, callDuringTry=True) + + expr.add_parse_action(copy_token_to_repeater, callDuringTry=True) + rep.set_name("(prev) " + str(expr)) + return rep + + +def one_of( + strs: Union[typing.Iterable[str], str], + caseless: bool = False, + use_regex: bool = True, + as_keyword: bool = False, + *, + useRegex: bool = True, + asKeyword: bool = False, +) -> ParserElement: + """Helper to quickly define a set of alternative :class:`Literal` s, + and makes sure to do longest-first testing when there is a conflict, + regardless of the input order, but returns + a :class:`MatchFirst` for best performance. + + Parameters: + + - ``strs`` - a string of space-delimited literals, or a collection of + string literals + - ``caseless`` - treat all literals as caseless - (default= ``False``) + - ``use_regex`` - as an optimization, will + generate a :class:`Regex` object; otherwise, will generate + a :class:`MatchFirst` object (if ``caseless=True`` or ``asKeyword=True``, or if + creating a :class:`Regex` raises an exception) - (default= ``True``) + - ``as_keyword`` - enforce :class:`Keyword`-style matching on the + generated expressions - (default= ``False``) + - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility, + but will be removed in a future release + + Example:: + + comp_oper = one_of("< = > <= >= !=") + var = Word(alphas) + number = Word(nums) + term = var | number + comparison_expr = term + comp_oper + term + print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12")) + + prints:: + + [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']] + """ + asKeyword = asKeyword or as_keyword + useRegex = useRegex and use_regex + + if ( + isinstance(caseless, str_type) + and __diag__.warn_on_multiple_string_args_to_oneof + ): + warnings.warn( + "More than one string argument passed to one_of, pass" + " choices as a list or space-delimited string", + stacklevel=2, + ) + + if caseless: + isequal = lambda a, b: a.upper() == b.upper() + masks = lambda a, b: b.upper().startswith(a.upper()) + parseElementClass = CaselessKeyword if asKeyword else CaselessLiteral + else: + isequal = lambda a, b: a == b + masks = lambda a, b: b.startswith(a) + parseElementClass = Keyword if asKeyword else Literal + + symbols: List[str] = [] + if isinstance(strs, str_type): + symbols = strs.split() + elif isinstance(strs, Iterable): + symbols = list(strs) + else: + raise TypeError("Invalid argument to one_of, expected string or iterable") + if not symbols: + return NoMatch() + + # reorder given symbols to take care to avoid masking longer choices with shorter ones + # (but only if the given symbols are not just single characters) + if any(len(sym) > 1 for sym in symbols): + i = 0 + while i < len(symbols) - 1: + cur = symbols[i] + for j, other in enumerate(symbols[i + 1 :]): + if isequal(other, cur): + del symbols[i + j + 1] + break + elif masks(cur, other): + del symbols[i + j + 1] + symbols.insert(i, other) + break + else: + i += 1 + + if useRegex: + re_flags: int = re.IGNORECASE if caseless else 0 + + try: + if all(len(sym) == 1 for sym in symbols): + # symbols are just single characters, create range regex pattern + patt = "[{}]".format( + "".join(_escape_regex_range_chars(sym) for sym in symbols) + ) + else: + patt = "|".join(re.escape(sym) for sym in symbols) + + # wrap with \b word break markers if defining as keywords + if asKeyword: + patt = r"\b(?:{})\b".format(patt) + + ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols)) + + if caseless: + # add parse action to return symbols as specified, not in random + # casing as found in input string + symbol_map = {sym.lower(): sym for sym in symbols} + ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()]) + + return ret + + except re.error: + warnings.warn( + "Exception creating Regex for one_of, building MatchFirst", stacklevel=2 + ) + + # last resort, just use MatchFirst + return MatchFirst(parseElementClass(sym) for sym in symbols).set_name( + " | ".join(symbols) + ) + + +def dict_of(key: ParserElement, value: ParserElement) -> ParserElement: + """Helper to easily and clearly define a dictionary by specifying + the respective patterns for the key and value. Takes care of + defining the :class:`Dict`, :class:`ZeroOrMore`, and + :class:`Group` tokens in the proper order. The key pattern + can include delimiting markers or punctuation, as long as they are + suppressed, thereby leaving the significant key text. The value + pattern can include named results, so that the :class:`Dict` results + can include named token fields. + + Example:: + + text = "shape: SQUARE posn: upper left color: light blue texture: burlap" + attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)) + print(attr_expr[1, ...].parse_string(text).dump()) + + attr_label = label + attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join) + + # similar to Dict, but simpler call format + result = dict_of(attr_label, attr_value).parse_string(text) + print(result.dump()) + print(result['shape']) + print(result.shape) # object attribute access works too + print(result.as_dict()) + + prints:: + + [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']] + - color: 'light blue' + - posn: 'upper left' + - shape: 'SQUARE' + - texture: 'burlap' + SQUARE + SQUARE + {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'} + """ + return Dict(OneOrMore(Group(key + value))) + + +def original_text_for( + expr: ParserElement, as_string: bool = True, *, asString: bool = True +) -> ParserElement: + """Helper to return the original, untokenized text for a given + expression. Useful to restore the parsed fields of an HTML start + tag into the raw tag text itself, or to revert separate tokens with + intervening whitespace back to the original matching input text. By + default, returns astring containing the original parsed text. + + If the optional ``as_string`` argument is passed as + ``False``, then the return value is + a :class:`ParseResults` containing any results names that + were originally matched, and a single token containing the original + matched text from the input string. So if the expression passed to + :class:`original_text_for` contains expressions with defined + results names, you must set ``as_string`` to ``False`` if you + want to preserve those results name values. + + The ``asString`` pre-PEP8 argument is retained for compatibility, + but will be removed in a future release. + + Example:: + + src = "this is test bold text normal text " + for tag in ("b", "i"): + opener, closer = make_html_tags(tag) + patt = original_text_for(opener + SkipTo(closer) + closer) + print(patt.search_string(src)[0]) + + prints:: + + [' bold text '] + ['text'] + """ + asString = asString and as_string + + locMarker = Empty().set_parse_action(lambda s, loc, t: loc) + endlocMarker = locMarker.copy() + endlocMarker.callPreparse = False + matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end") + if asString: + extractText = lambda s, l, t: s[t._original_start : t._original_end] + else: + + def extractText(s, l, t): + t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]] + + matchExpr.set_parse_action(extractText) + matchExpr.ignoreExprs = expr.ignoreExprs + matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection) + return matchExpr + + +def ungroup(expr: ParserElement) -> ParserElement: + """Helper to undo pyparsing's default grouping of And expressions, + even if all but one are non-empty. + """ + return TokenConverter(expr).add_parse_action(lambda t: t[0]) + + +def locatedExpr(expr: ParserElement) -> ParserElement: + """ + (DEPRECATED - future code should use the Located class) + Helper to decorate a returned token with its starting and ending + locations in the input string. + + This helper adds the following results names: + + - ``locn_start`` - location where matched expression begins + - ``locn_end`` - location where matched expression ends + - ``value`` - the actual parsed results + + Be careful if the input text contains ```` characters, you + may want to call :class:`ParserElement.parseWithTabs` + + Example:: + + wd = Word(alphas) + for match in locatedExpr(wd).searchString("ljsdf123lksdjjf123lkkjj1222"): + print(match) + + prints:: + + [[0, 'ljsdf', 5]] + [[8, 'lksdjjf', 15]] + [[18, 'lkkjj', 23]] + """ + locator = Empty().set_parse_action(lambda ss, ll, tt: ll) + return Group( + locator("locn_start") + + expr("value") + + locator.copy().leaveWhitespace()("locn_end") + ) + + +def nested_expr( + opener: Union[str, ParserElement] = "(", + closer: Union[str, ParserElement] = ")", + content: typing.Optional[ParserElement] = None, + ignore_expr: ParserElement = quoted_string(), + *, + ignoreExpr: ParserElement = quoted_string(), +) -> ParserElement: + """Helper method for defining nested lists enclosed in opening and + closing delimiters (``"("`` and ``")"`` are the default). + + Parameters: + - ``opener`` - opening character for a nested list + (default= ``"("``); can also be a pyparsing expression + - ``closer`` - closing character for a nested list + (default= ``")"``); can also be a pyparsing expression + - ``content`` - expression for items within the nested lists + (default= ``None``) + - ``ignore_expr`` - expression for ignoring opening and closing delimiters + (default= :class:`quoted_string`) + - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility + but will be removed in a future release + + If an expression is not provided for the content argument, the + nested expression will capture all whitespace-delimited content + between delimiters as a list of separate values. + + Use the ``ignore_expr`` argument to define expressions that may + contain opening or closing characters that should not be treated as + opening or closing characters for nesting, such as quoted_string or + a comment expression. Specify multiple expressions using an + :class:`Or` or :class:`MatchFirst`. The default is + :class:`quoted_string`, but if no expressions are to be ignored, then + pass ``None`` for this argument. + + Example:: + + data_type = one_of("void int short long char float double") + decl_data_type = Combine(data_type + Opt(Word('*'))) + ident = Word(alphas+'_', alphanums+'_') + number = pyparsing_common.number + arg = Group(decl_data_type + ident) + LPAR, RPAR = map(Suppress, "()") + + code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment)) + + c_function = (decl_data_type("type") + + ident("name") + + LPAR + Opt(delimited_list(arg), [])("args") + RPAR + + code_body("body")) + c_function.ignore(c_style_comment) + + source_code = ''' + int is_odd(int x) { + return (x%2); + } + + int dec_to_hex(char hchar) { + if (hchar >= '0' && hchar <= '9') { + return (ord(hchar)-ord('0')); + } else { + return (10+ord(hchar)-ord('A')); + } + } + ''' + for func in c_function.search_string(source_code): + print("%(name)s (%(type)s) args: %(args)s" % func) + + + prints:: + + is_odd (int) args: [['int', 'x']] + dec_to_hex (int) args: [['char', 'hchar']] + """ + if ignoreExpr != ignore_expr: + ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr + if opener == closer: + raise ValueError("opening and closing strings cannot be the same") + if content is None: + if isinstance(opener, str_type) and isinstance(closer, str_type): + if len(opener) == 1 and len(closer) == 1: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS, + exact=1, + ) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = empty.copy() + CharsNotIn( + opener + closer + ParserElement.DEFAULT_WHITE_CHARS + ).set_parse_action(lambda t: t[0].strip()) + else: + if ignoreExpr is not None: + content = Combine( + OneOrMore( + ~ignoreExpr + + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + content = Combine( + OneOrMore( + ~Literal(opener) + + ~Literal(closer) + + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1) + ) + ).set_parse_action(lambda t: t[0].strip()) + else: + raise ValueError( + "opening and closing arguments must be strings if no content expression is given" + ) + ret = Forward() + if ignoreExpr is not None: + ret <<= Group( + Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer) + ) + else: + ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer)) + ret.set_name("nested %s%s expression" % (opener, closer)) + return ret + + +def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")): + """Internal helper to construct opening and closing tag expressions, given a tag name""" + if isinstance(tagStr, str_type): + resname = tagStr + tagStr = Keyword(tagStr, caseless=not xml) + else: + resname = tagStr.name + + tagAttrName = Word(alphas, alphanums + "_-:") + if xml: + tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue))) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + else: + tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word( + printables, exclude_chars=">" + ) + openTag = ( + suppress_LT + + tagStr("tag") + + Dict( + ZeroOrMore( + Group( + tagAttrName.set_parse_action(lambda t: t[0].lower()) + + Opt(Suppress("=") + tagAttrValue) + ) + ) + ) + + Opt("/", default=[False])("empty").set_parse_action( + lambda s, l, t: t[0] == "/" + ) + + suppress_GT + ) + closeTag = Combine(Literal("", adjacent=False) + + openTag.set_name("<%s>" % resname) + # add start results name in parse action now that ungrouped names are not reported at two levels + openTag.add_parse_action( + lambda t: t.__setitem__( + "start" + "".join(resname.replace(":", " ").title().split()), t.copy() + ) + ) + closeTag = closeTag( + "end" + "".join(resname.replace(":", " ").title().split()) + ).set_name("" % resname) + openTag.tag = resname + closeTag.tag = resname + openTag.tag_body = SkipTo(closeTag()) + return openTag, closeTag + + +def make_html_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for HTML, + given a tag name. Matches tags in either upper or lower case, + attributes with namespaces and with quoted or unquoted values. + + Example:: + + text = 'More info at the pyparsing wiki page' + # make_html_tags returns pyparsing expressions for the opening and + # closing tags as a 2-tuple + a, a_end = make_html_tags("A") + link_expr = a + SkipTo(a_end)("link_text") + a_end + + for link in link_expr.search_string(text): + # attributes in the tag (like "href" shown here) are + # also accessible as named results + print(link.link_text, '->', link.href) + + prints:: + + pyparsing -> https://github.com/pyparsing/pyparsing/wiki + """ + return _makeTags(tag_str, False) + + +def make_xml_tags( + tag_str: Union[str, ParserElement] +) -> Tuple[ParserElement, ParserElement]: + """Helper to construct opening and closing tag expressions for XML, + given a tag name. Matches tags only in the given upper/lower case. + + Example: similar to :class:`make_html_tags` + """ + return _makeTags(tag_str, True) + + +any_open_tag: ParserElement +any_close_tag: ParserElement +any_open_tag, any_close_tag = make_html_tags( + Word(alphas, alphanums + "_:").set_name("any tag") +) + +_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()} +common_html_entity = Regex("&(?P" + "|".join(_htmlEntityMap) + ");").set_name( + "common HTML entity" +) + + +def replace_html_entity(t): + """Helper parser action to replace common HTML entities with their special characters""" + return _htmlEntityMap.get(t.entity) + + +class OpAssoc(Enum): + LEFT = 1 + RIGHT = 2 + + +InfixNotationOperatorArgType = Union[ + ParserElement, str, Tuple[Union[ParserElement, str], Union[ParserElement, str]] +] +InfixNotationOperatorSpec = Union[ + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + typing.Optional[ParseAction], + ], + Tuple[ + InfixNotationOperatorArgType, + int, + OpAssoc, + ], +] + + +def infix_notation( + base_expr: ParserElement, + op_list: List[InfixNotationOperatorSpec], + lpar: Union[str, ParserElement] = Suppress("("), + rpar: Union[str, ParserElement] = Suppress(")"), +) -> ParserElement: + """Helper method for constructing grammars of expressions made up of + operators working in a precedence hierarchy. Operators may be unary + or binary, left- or right-associative. Parse actions can also be + attached to operator expressions. The generated parser will also + recognize the use of parentheses to override operator precedences + (see example below). + + Note: if you define a deep operator list, you may see performance + issues when using infix_notation. See + :class:`ParserElement.enable_packrat` for a mechanism to potentially + improve your parser performance. + + Parameters: + - ``base_expr`` - expression representing the most basic operand to + be used in the expression + - ``op_list`` - list of tuples, one for each operator precedence level + in the expression grammar; each tuple is of the form ``(op_expr, + num_operands, right_left_assoc, (optional)parse_action)``, where: + + - ``op_expr`` is the pyparsing expression for the operator; may also + be a string, which will be converted to a Literal; if ``num_operands`` + is 3, ``op_expr`` is a tuple of two expressions, for the two + operators separating the 3 terms + - ``num_operands`` is the number of terms for this operator (must be 1, + 2, or 3) + - ``right_left_assoc`` is the indicator whether the operator is right + or left associative, using the pyparsing-defined constants + ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``. + - ``parse_action`` is the parse action to be associated with + expressions matching this operator expression (the parse action + tuple member may be omitted); if the parse action is passed + a tuple or list of functions, this is equivalent to calling + ``set_parse_action(*fn)`` + (:class:`ParserElement.set_parse_action`) + - ``lpar`` - expression for matching left-parentheses; if passed as a + str, then will be parsed as Suppress(lpar). If lpar is passed as + an expression (such as ``Literal('(')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress('(')``) + - ``rpar`` - expression for matching right-parentheses; if passed as a + str, then will be parsed as Suppress(rpar). If rpar is passed as + an expression (such as ``Literal(')')``), then it will be kept in + the parsed results, and grouped with them. (default= ``Suppress(')')``) + + Example:: + + # simple example of four-function arithmetic with ints and + # variable names + integer = pyparsing_common.signed_integer + varname = pyparsing_common.identifier + + arith_expr = infix_notation(integer | varname, + [ + ('-', 1, OpAssoc.RIGHT), + (one_of('* /'), 2, OpAssoc.LEFT), + (one_of('+ -'), 2, OpAssoc.LEFT), + ]) + + arith_expr.run_tests(''' + 5+3*6 + (5+3)*6 + -2--11 + ''', full_dump=False) + + prints:: + + 5+3*6 + [[5, '+', [3, '*', 6]]] + + (5+3)*6 + [[[5, '+', 3], '*', 6]] + + -2--11 + [[['-', 2], '-', ['-', 11]]] + """ + # captive version of FollowedBy that does not do parse actions or capture results names + class _FB(FollowedBy): + def parseImpl(self, instring, loc, doActions=True): + self.expr.try_parse(instring, loc) + return loc, [] + + _FB.__name__ = "FollowedBy>" + + ret = Forward() + if isinstance(lpar, str): + lpar = Suppress(lpar) + if isinstance(rpar, str): + rpar = Suppress(rpar) + + # if lpar and rpar are not suppressed, wrap in group + if not (isinstance(rpar, Suppress) and isinstance(rpar, Suppress)): + lastExpr = base_expr | Group(lpar + ret + rpar) + else: + lastExpr = base_expr | (lpar + ret + rpar) + + for i, operDef in enumerate(op_list): + opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] + if isinstance(opExpr, str_type): + opExpr = ParserElement._literalStringClass(opExpr) + if arity == 3: + if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2: + raise ValueError( + "if numterms=3, opExpr must be a tuple or list of two expressions" + ) + opExpr1, opExpr2 = opExpr + term_name = "{}{} term".format(opExpr1, opExpr2) + else: + term_name = "{} term".format(opExpr) + + if not 1 <= arity <= 3: + raise ValueError("operator must be unary (1), binary (2), or ternary (3)") + + if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT): + raise ValueError("operator must indicate right or left associativity") + + thisExpr: Forward = Forward().set_name(term_name) + if rightLeftAssoc is OpAssoc.LEFT: + if arity == 1: + matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...]) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group( + lastExpr + (opExpr + lastExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...]) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr + ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr)) + elif rightLeftAssoc is OpAssoc.RIGHT: + if arity == 1: + # try to avoid LR with this extra test + if not isinstance(opExpr, Opt): + opExpr = Opt(opExpr) + matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr) + elif arity == 2: + if opExpr is not None: + matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group( + lastExpr + (opExpr + thisExpr)[1, ...] + ) + else: + matchExpr = _FB(lastExpr + thisExpr) + Group( + lastExpr + thisExpr[1, ...] + ) + elif arity == 3: + matchExpr = _FB( + lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr + ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr) + if pa: + if isinstance(pa, (tuple, list)): + matchExpr.set_parse_action(*pa) + else: + matchExpr.set_parse_action(pa) + thisExpr <<= (matchExpr | lastExpr).setName(term_name) + lastExpr = thisExpr + ret <<= lastExpr + return ret + + +def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]): + """ + (DEPRECATED - use IndentedBlock class instead) + Helper method for defining space-delimited indentation blocks, + such as those used to define block statements in Python source code. + + Parameters: + + - ``blockStatementExpr`` - expression defining syntax of statement that + is repeated within the indented block + - ``indentStack`` - list created by caller to manage indentation stack + (multiple ``statementWithIndentedBlock`` expressions within a single + grammar should share a common ``indentStack``) + - ``indent`` - boolean indicating whether block must be indented beyond + the current level; set to ``False`` for block of left-most statements + (default= ``True``) + + A valid block must contain at least one ``blockStatement``. + + (Note that indentedBlock uses internal parse actions which make it + incompatible with packrat parsing.) + + Example:: + + data = ''' + def A(z): + A1 + B = 100 + G = A2 + A2 + A3 + B + def BB(a,b,c): + BB1 + def BBA(): + bba1 + bba2 + bba3 + C + D + def spam(x,y): + def eggs(z): + pass + ''' + + + indentStack = [1] + stmt = Forward() + + identifier = Word(alphas, alphanums) + funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":") + func_body = indentedBlock(stmt, indentStack) + funcDef = Group(funcDecl + func_body) + + rvalue = Forward() + funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")") + rvalue << (funcCall | identifier | Word(nums)) + assignment = Group(identifier + "=" + rvalue) + stmt << (funcDef | assignment | identifier) + + module_body = stmt[1, ...] + + parseTree = module_body.parseString(data) + parseTree.pprint() + + prints:: + + [['def', + 'A', + ['(', 'z', ')'], + ':', + [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]], + 'B', + ['def', + 'BB', + ['(', 'a', 'b', 'c', ')'], + ':', + [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]], + 'C', + 'D', + ['def', + 'spam', + ['(', 'x', 'y', ')'], + ':', + [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]] + """ + backup_stacks.append(indentStack[:]) + + def reset_stack(): + indentStack[:] = backup_stacks[-1] + + def checkPeerIndent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if curCol != indentStack[-1]: + if curCol > indentStack[-1]: + raise ParseException(s, l, "illegal nesting") + raise ParseException(s, l, "not a peer entry") + + def checkSubIndent(s, l, t): + curCol = col(l, s) + if curCol > indentStack[-1]: + indentStack.append(curCol) + else: + raise ParseException(s, l, "not a subentry") + + def checkUnindent(s, l, t): + if l >= len(s): + return + curCol = col(l, s) + if not (indentStack and curCol in indentStack): + raise ParseException(s, l, "not an unindent") + if curCol < indentStack[-1]: + indentStack.pop() + + NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress()) + INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT") + PEER = Empty().set_parse_action(checkPeerIndent).set_name("") + UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT") + if indent: + smExpr = Group( + Opt(NL) + + INDENT + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + UNDENT + ) + else: + smExpr = Group( + Opt(NL) + + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL)) + + Opt(UNDENT) + ) + + # add a parse action to remove backup_stack from list of backups + smExpr.add_parse_action( + lambda: backup_stacks.pop(-1) and None if backup_stacks else None + ) + smExpr.set_fail_action(lambda a, b, c, d: reset_stack()) + blockStatementExpr.ignore(_bslash + LineEnd()) + return smExpr.set_name("indented block") + + +# it's easy to get these comment structures wrong - they're very common, so may as well make them available +c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name( + "C style comment" +) +"Comment of the form ``/* ... */``" + +html_comment = Regex(r"").set_name("HTML comment") +"Comment of the form ````" + +rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line") +dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment") +"Comment of the form ``// ... (to end of line)``" + +cpp_style_comment = Combine( + Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment +).set_name("C++ style comment") +"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`" + +java_style_comment = cpp_style_comment +"Same as :class:`cpp_style_comment`" + +python_style_comment = Regex(r"#.*").set_name("Python style comment") +"Comment of the form ``# ... (to end of line)``" + + +# build list of built-in expressions, for future reference if a global default value +# gets updated +_builtin_exprs: List[ParserElement] = [ + v for v in vars().values() if isinstance(v, ParserElement) +] + + +# pre-PEP8 compatible names +delimitedList = delimited_list +countedArray = counted_array +matchPreviousLiteral = match_previous_literal +matchPreviousExpr = match_previous_expr +oneOf = one_of +dictOf = dict_of +originalTextFor = original_text_for +nestedExpr = nested_expr +makeHTMLTags = make_html_tags +makeXMLTags = make_xml_tags +anyOpenTag, anyCloseTag = any_open_tag, any_close_tag +commonHTMLEntity = common_html_entity +replaceHTMLEntity = replace_html_entity +opAssoc = OpAssoc +infixNotation = infix_notation +cStyleComment = c_style_comment +htmlComment = html_comment +restOfLine = rest_of_line +dblSlashComment = dbl_slash_comment +cppStyleComment = cpp_style_comment +javaStyleComment = java_style_comment +pythonStyleComment = python_style_comment diff --git a/src/poetry/core/_vendor/pyparsing/py.typed b/src/poetry/core/_vendor/pyparsing/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/pyparsing/results.py b/src/poetry/core/_vendor/pyparsing/results.py new file mode 100644 index 0000000..00c9421 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/results.py @@ -0,0 +1,760 @@ +# results.py +from collections.abc import MutableMapping, Mapping, MutableSequence, Iterator +import pprint +from weakref import ref as wkref +from typing import Tuple, Any + +str_type: Tuple[type, ...] = (str, bytes) +_generator_type = type((_ for _ in ())) + + +class _ParseResultsWithOffset: + __slots__ = ["tup"] + + def __init__(self, p1, p2): + self.tup = (p1, p2) + + def __getitem__(self, i): + return self.tup[i] + + def __getstate__(self): + return self.tup + + def __setstate__(self, *args): + self.tup = args[0] + + +class ParseResults: + """Structured parse results, to provide multiple means of access to + the parsed data: + + - as a list (``len(results)``) + - by list index (``results[0], results[1]``, etc.) + - by attribute (``results.`` - see :class:`ParserElement.set_results_name`) + + Example:: + + integer = Word(nums) + date_str = (integer.set_results_name("year") + '/' + + integer.set_results_name("month") + '/' + + integer.set_results_name("day")) + # equivalent form: + # date_str = (integer("year") + '/' + # + integer("month") + '/' + # + integer("day")) + + # parse_string returns a ParseResults object + result = date_str.parse_string("1999/12/31") + + def test(s, fn=repr): + print("{} -> {}".format(s, fn(eval(s)))) + test("list(result)") + test("result[0]") + test("result['month']") + test("result.day") + test("'month' in result") + test("'minutes' in result") + test("result.dump()", str) + + prints:: + + list(result) -> ['1999', '/', '12', '/', '31'] + result[0] -> '1999' + result['month'] -> '12' + result.day -> '31' + 'month' in result -> True + 'minutes' in result -> False + result.dump() -> ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + """ + + _null_values: Tuple[Any, ...] = (None, [], "", ()) + + __slots__ = [ + "_name", + "_parent", + "_all_names", + "_modal", + "_toklist", + "_tokdict", + "__weakref__", + ] + + class List(list): + """ + Simple wrapper class to distinguish parsed list results that should be preserved + as actual Python lists, instead of being converted to :class:`ParseResults`: + + LBRACK, RBRACK = map(pp.Suppress, "[]") + element = pp.Forward() + item = ppc.integer + element_list = LBRACK + pp.delimited_list(element) + RBRACK + + # add parse actions to convert from ParseResults to actual Python collection types + def as_python_list(t): + return pp.ParseResults.List(t.as_list()) + element_list.add_parse_action(as_python_list) + + element <<= item | element_list + + element.run_tests(''' + 100 + [2,3,4] + [[2, 1],3,4] + [(2, 1),3,4] + (2,3,4) + ''', post_parse=lambda s, r: (r[0], type(r[0]))) + + prints: + + 100 + (100, ) + + [2,3,4] + ([2, 3, 4], ) + + [[2, 1],3,4] + ([[2, 1], 3, 4], ) + + (Used internally by :class:`Group` when `aslist=True`.) + """ + + def __new__(cls, contained=None): + if contained is None: + contained = [] + + if not isinstance(contained, list): + raise TypeError( + "{} may only be constructed with a list," + " not {}".format(cls.__name__, type(contained).__name__) + ) + + return list.__new__(cls) + + def __new__(cls, toklist=None, name=None, **kwargs): + if isinstance(toklist, ParseResults): + return toklist + self = object.__new__(cls) + self._name = None + self._parent = None + self._all_names = set() + + if toklist is None: + self._toklist = [] + elif isinstance(toklist, (list, _generator_type)): + self._toklist = ( + [toklist[:]] + if isinstance(toklist, ParseResults.List) + else list(toklist) + ) + else: + self._toklist = [toklist] + self._tokdict = dict() + return self + + # Performance tuning: we construct a *lot* of these, so keep this + # constructor as small and fast as possible + def __init__( + self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance + ): + self._modal = modal + if name is not None and name != "": + if isinstance(name, int): + name = str(name) + if not modal: + self._all_names = {name} + self._name = name + if toklist not in self._null_values: + if isinstance(toklist, (str_type, type)): + toklist = [toklist] + if asList: + if isinstance(toklist, ParseResults): + self[name] = _ParseResultsWithOffset( + ParseResults(toklist._toklist), 0 + ) + else: + self[name] = _ParseResultsWithOffset( + ParseResults(toklist[0]), 0 + ) + self[name]._name = name + else: + try: + self[name] = toklist[0] + except (KeyError, TypeError, IndexError): + if toklist is not self: + self[name] = toklist + else: + self._name = name + + def __getitem__(self, i): + if isinstance(i, (int, slice)): + return self._toklist[i] + else: + if i not in self._all_names: + return self._tokdict[i][-1][0] + else: + return ParseResults([v[0] for v in self._tokdict[i]]) + + def __setitem__(self, k, v, isinstance=isinstance): + if isinstance(v, _ParseResultsWithOffset): + self._tokdict[k] = self._tokdict.get(k, list()) + [v] + sub = v[0] + elif isinstance(k, (int, slice)): + self._toklist[k] = v + sub = v + else: + self._tokdict[k] = self._tokdict.get(k, list()) + [ + _ParseResultsWithOffset(v, 0) + ] + sub = v + if isinstance(sub, ParseResults): + sub._parent = wkref(self) + + def __delitem__(self, i): + if isinstance(i, (int, slice)): + mylen = len(self._toklist) + del self._toklist[i] + + # convert int to slice + if isinstance(i, int): + if i < 0: + i += mylen + i = slice(i, i + 1) + # get removed indices + removed = list(range(*i.indices(mylen))) + removed.reverse() + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for j in removed: + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position - (position > j) + ) + else: + del self._tokdict[i] + + def __contains__(self, k) -> bool: + return k in self._tokdict + + def __len__(self) -> int: + return len(self._toklist) + + def __bool__(self) -> bool: + return not not (self._toklist or self._tokdict) + + def __iter__(self) -> Iterator: + return iter(self._toklist) + + def __reversed__(self) -> Iterator: + return iter(self._toklist[::-1]) + + def keys(self): + return iter(self._tokdict) + + def values(self): + return (self[k] for k in self.keys()) + + def items(self): + return ((k, self[k]) for k in self.keys()) + + def haskeys(self) -> bool: + """ + Since ``keys()`` returns an iterator, this method is helpful in bypassing + code that looks for the existence of any defined results names.""" + return bool(self._tokdict) + + def pop(self, *args, **kwargs): + """ + Removes and returns item at specified index (default= ``last``). + Supports both ``list`` and ``dict`` semantics for ``pop()``. If + passed no argument or an integer argument, it will use ``list`` + semantics and pop tokens from the list of parsed tokens. If passed + a non-integer argument (most likely a string), it will use ``dict`` + semantics and pop the corresponding value from any defined results + names. A second default return value argument is supported, just as in + ``dict.pop()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + def remove_first(tokens): + tokens.pop(0) + numlist.add_parse_action(remove_first) + print(numlist.parse_string("0 123 321")) # -> ['123', '321'] + + label = Word(alphas) + patt = label("LABEL") + Word(nums)[1, ...] + print(patt.parse_string("AAB 123 321").dump()) + + # Use pop() in a parse action to remove named result (note that corresponding value is not + # removed from list form of results) + def remove_LABEL(tokens): + tokens.pop("LABEL") + return tokens + patt.add_parse_action(remove_LABEL) + print(patt.parse_string("AAB 123 321").dump()) + + prints:: + + ['AAB', '123', '321'] + - LABEL: 'AAB' + + ['AAB', '123', '321'] + """ + if not args: + args = [-1] + for k, v in kwargs.items(): + if k == "default": + args = (args[0], v) + else: + raise TypeError( + "pop() got an unexpected keyword argument {!r}".format(k) + ) + if isinstance(args[0], int) or len(args) == 1 or args[0] in self: + index = args[0] + ret = self[index] + del self[index] + return ret + else: + defaultvalue = args[1] + return defaultvalue + + def get(self, key, default_value=None): + """ + Returns named result matching the given key, or if there is no + such name, then returns the given ``default_value`` or ``None`` if no + ``default_value`` is specified. + + Similar to ``dict.get()``. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string("1999/12/31") + print(result.get("year")) # -> '1999' + print(result.get("hour", "not specified")) # -> 'not specified' + print(result.get("hour")) # -> None + """ + if key in self: + return self[key] + else: + return default_value + + def insert(self, index, ins_string): + """ + Inserts new element at location index in the list of parsed tokens. + + Similar to ``list.insert()``. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to insert the parse location in the front of the parsed results + def insert_locn(locn, tokens): + tokens.insert(0, locn) + numlist.add_parse_action(insert_locn) + print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321'] + """ + self._toklist.insert(index, ins_string) + # fixup indices in token dictionary + for name, occurrences in self._tokdict.items(): + for k, (value, position) in enumerate(occurrences): + occurrences[k] = _ParseResultsWithOffset( + value, position + (position > index) + ) + + def append(self, item): + """ + Add single element to end of ``ParseResults`` list of elements. + + Example:: + + numlist = Word(nums)[...] + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321'] + + # use a parse action to compute the sum of the parsed integers, and add it to the end + def append_sum(tokens): + tokens.append(sum(map(int, tokens))) + numlist.add_parse_action(append_sum) + print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444] + """ + self._toklist.append(item) + + def extend(self, itemseq): + """ + Add sequence of elements to end of ``ParseResults`` list of elements. + + Example:: + + patt = Word(alphas)[1, ...] + + # use a parse action to append the reverse of the matched strings, to make a palindrome + def make_palindrome(tokens): + tokens.extend(reversed([t[::-1] for t in tokens])) + return ''.join(tokens) + patt.add_parse_action(make_palindrome) + print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl' + """ + if isinstance(itemseq, ParseResults): + self.__iadd__(itemseq) + else: + self._toklist.extend(itemseq) + + def clear(self): + """ + Clear all elements and results names. + """ + del self._toklist[:] + self._tokdict.clear() + + def __getattr__(self, name): + try: + return self[name] + except KeyError: + if name.startswith("__"): + raise AttributeError(name) + return "" + + def __add__(self, other) -> "ParseResults": + ret = self.copy() + ret += other + return ret + + def __iadd__(self, other) -> "ParseResults": + if other._tokdict: + offset = len(self._toklist) + addoffset = lambda a: offset if a < 0 else a + offset + otheritems = other._tokdict.items() + otherdictitems = [ + (k, _ParseResultsWithOffset(v[0], addoffset(v[1]))) + for k, vlist in otheritems + for v in vlist + ] + for k, v in otherdictitems: + self[k] = v + if isinstance(v[0], ParseResults): + v[0]._parent = wkref(self) + + self._toklist += other._toklist + self._all_names |= other._all_names + return self + + def __radd__(self, other) -> "ParseResults": + if isinstance(other, int) and other == 0: + # useful for merging many ParseResults using sum() builtin + return self.copy() + else: + # this may raise a TypeError - so be it + return other + self + + def __repr__(self) -> str: + return "{}({!r}, {})".format(type(self).__name__, self._toklist, self.as_dict()) + + def __str__(self) -> str: + return ( + "[" + + ", ".join( + [ + str(i) if isinstance(i, ParseResults) else repr(i) + for i in self._toklist + ] + ) + + "]" + ) + + def _asStringList(self, sep=""): + out = [] + for item in self._toklist: + if out and sep: + out.append(sep) + if isinstance(item, ParseResults): + out += item._asStringList() + else: + out.append(str(item)) + return out + + def as_list(self) -> list: + """ + Returns the parse results as a nested list of matching tokens, all converted to strings. + + Example:: + + patt = Word(alphas)[1, ...] + result = patt.parse_string("sldkj lsdkj sldkj") + # even though the result prints in string-like form, it is actually a pyparsing ParseResults + print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj'] + + # Use as_list() to create an actual list + result_list = result.as_list() + print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj'] + """ + return [ + res.as_list() if isinstance(res, ParseResults) else res + for res in self._toklist + ] + + def as_dict(self) -> dict: + """ + Returns the named parse results as a nested dictionary. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('12/31/1999') + print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]}) + + result_dict = result.as_dict() + print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'} + + # even though a ParseResults supports dict-like access, sometime you just need to have a dict + import json + print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable + print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"} + """ + + def to_item(obj): + if isinstance(obj, ParseResults): + return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj] + else: + return obj + + return dict((k, to_item(v)) for k, v in self.items()) + + def copy(self) -> "ParseResults": + """ + Returns a new copy of a :class:`ParseResults` object. + """ + ret = ParseResults(self._toklist) + ret._tokdict = self._tokdict.copy() + ret._parent = self._parent + ret._all_names |= self._all_names + ret._name = self._name + return ret + + def get_name(self): + r""" + Returns the results name for this token expression. Useful when several + different expressions might match at a particular location. + + Example:: + + integer = Word(nums) + ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d") + house_number_expr = Suppress('#') + Word(nums, alphanums) + user_data = (Group(house_number_expr)("house_number") + | Group(ssn_expr)("ssn") + | Group(integer)("age")) + user_info = user_data[1, ...] + + result = user_info.parse_string("22 111-22-3333 #221B") + for item in result: + print(item.get_name(), ':', item[0]) + + prints:: + + age : 22 + ssn : 111-22-3333 + house_number : 221B + """ + if self._name: + return self._name + elif self._parent: + par = self._parent() + + def find_in_parent(sub): + return next( + ( + k + for k, vlist in par._tokdict.items() + for v, loc in vlist + if sub is v + ), + None, + ) + + return find_in_parent(self) if par else None + elif ( + len(self) == 1 + and len(self._tokdict) == 1 + and next(iter(self._tokdict.values()))[0][1] in (0, -1) + ): + return next(iter(self._tokdict.keys())) + else: + return None + + def dump(self, indent="", full=True, include_list=True, _depth=0) -> str: + """ + Diagnostic method for listing out the contents of + a :class:`ParseResults`. Accepts an optional ``indent`` argument so + that this string can be embedded in a nested display of other data. + + Example:: + + integer = Word(nums) + date_str = integer("year") + '/' + integer("month") + '/' + integer("day") + + result = date_str.parse_string('1999/12/31') + print(result.dump()) + + prints:: + + ['1999', '/', '12', '/', '31'] + - day: '31' + - month: '12' + - year: '1999' + """ + out = [] + NL = "\n" + out.append(indent + str(self.as_list()) if include_list else "") + + if full: + if self.haskeys(): + items = sorted((str(k), v) for k, v in self.items()) + for k, v in items: + if out: + out.append(NL) + out.append("{}{}- {}: ".format(indent, (" " * _depth), k)) + if isinstance(v, ParseResults): + if v: + out.append( + v.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ) + ) + else: + out.append(str(v)) + else: + out.append(repr(v)) + if any(isinstance(vv, ParseResults) for vv in self): + v = self + for i, vv in enumerate(v): + if isinstance(vv, ParseResults): + out.append( + "\n{}{}[{}]:\n{}{}{}".format( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + vv.dump( + indent=indent, + full=full, + include_list=include_list, + _depth=_depth + 1, + ), + ) + ) + else: + out.append( + "\n%s%s[%d]:\n%s%s%s" + % ( + indent, + (" " * (_depth)), + i, + indent, + (" " * (_depth + 1)), + str(vv), + ) + ) + + return "".join(out) + + def pprint(self, *args, **kwargs): + """ + Pretty-printer for parsed results as a list, using the + `pprint `_ module. + Accepts additional positional or keyword args as defined for + `pprint.pprint `_ . + + Example:: + + ident = Word(alphas, alphanums) + num = Word(nums) + func = Forward() + term = ident | num | Group('(' + func + ')') + func <<= ident + Group(Optional(delimited_list(term))) + result = func.parse_string("fna a,b,(fnb c,d,200),100") + result.pprint(width=40) + + prints:: + + ['fna', + ['a', + 'b', + ['(', 'fnb', ['c', 'd', '200'], ')'], + '100']] + """ + pprint.pprint(self.as_list(), *args, **kwargs) + + # add support for pickle protocol + def __getstate__(self): + return ( + self._toklist, + ( + self._tokdict.copy(), + self._parent is not None and self._parent() or None, + self._all_names, + self._name, + ), + ) + + def __setstate__(self, state): + self._toklist, (self._tokdict, par, inAccumNames, self._name) = state + self._all_names = set(inAccumNames) + if par is not None: + self._parent = wkref(par) + else: + self._parent = None + + def __getnewargs__(self): + return self._toklist, self._name + + def __dir__(self): + return dir(type(self)) + list(self.keys()) + + @classmethod + def from_dict(cls, other, name=None) -> "ParseResults": + """ + Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the + name-value relations as results names. If an optional ``name`` argument is + given, a nested ``ParseResults`` will be returned. + """ + + def is_iterable(obj): + try: + iter(obj) + except Exception: + return False + else: + return not isinstance(obj, str_type) + + ret = cls([]) + for k, v in other.items(): + if isinstance(v, Mapping): + ret += cls.from_dict(v, name=k) + else: + ret += cls([v], name=k, asList=is_iterable(v)) + if name is not None: + ret = cls([ret], name=name) + return ret + + asList = as_list + asDict = as_dict + getName = get_name + + +MutableMapping.register(ParseResults) +MutableSequence.register(ParseResults) diff --git a/src/poetry/core/_vendor/pyparsing/testing.py b/src/poetry/core/_vendor/pyparsing/testing.py new file mode 100644 index 0000000..84a0ef1 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/testing.py @@ -0,0 +1,331 @@ +# testing.py + +from contextlib import contextmanager +import typing + +from .core import ( + ParserElement, + ParseException, + Keyword, + __diag__, + __compat__, +) + + +class pyparsing_test: + """ + namespace class for classes useful in writing unit tests + """ + + class reset_pyparsing_context: + """ + Context manager to be used when writing unit tests that modify pyparsing config values: + - packrat parsing + - bounded recursion parsing + - default whitespace characters. + - default keyword characters + - literal string auto-conversion class + - __diag__ settings + + Example:: + + with reset_pyparsing_context(): + # test that literals used to construct a grammar are automatically suppressed + ParserElement.inlineLiteralsUsing(Suppress) + + term = Word(alphas) | Word(nums) + group = Group('(' + term[...] + ')') + + # assert that the '()' characters are not included in the parsed tokens + self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def']) + + # after exiting context manager, literals are converted to Literal expressions again + """ + + def __init__(self): + self._save_context = {} + + def save(self): + self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS + self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS + + self._save_context[ + "literal_string_class" + ] = ParserElement._literalStringClass + + self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace + + self._save_context["packrat_enabled"] = ParserElement._packratEnabled + if ParserElement._packratEnabled: + self._save_context[ + "packrat_cache_size" + ] = ParserElement.packrat_cache.size + else: + self._save_context["packrat_cache_size"] = None + self._save_context["packrat_parse"] = ParserElement._parse + self._save_context[ + "recursion_enabled" + ] = ParserElement._left_recursion_enabled + + self._save_context["__diag__"] = { + name: getattr(__diag__, name) for name in __diag__._all_names + } + + self._save_context["__compat__"] = { + "collect_all_And_tokens": __compat__.collect_all_And_tokens + } + + return self + + def restore(self): + # reset pyparsing global state + if ( + ParserElement.DEFAULT_WHITE_CHARS + != self._save_context["default_whitespace"] + ): + ParserElement.set_default_whitespace_chars( + self._save_context["default_whitespace"] + ) + + ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"] + + Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"] + ParserElement.inlineLiteralsUsing( + self._save_context["literal_string_class"] + ) + + for name, value in self._save_context["__diag__"].items(): + (__diag__.enable if value else __diag__.disable)(name) + + ParserElement._packratEnabled = False + if self._save_context["packrat_enabled"]: + ParserElement.enable_packrat(self._save_context["packrat_cache_size"]) + else: + ParserElement._parse = self._save_context["packrat_parse"] + ParserElement._left_recursion_enabled = self._save_context[ + "recursion_enabled" + ] + + __compat__.collect_all_And_tokens = self._save_context["__compat__"] + + return self + + def copy(self): + ret = type(self)() + ret._save_context.update(self._save_context) + return ret + + def __enter__(self): + return self.save() + + def __exit__(self, *args): + self.restore() + + class TestParseResultsAsserts: + """ + A mixin class to add parse results assertion methods to normal unittest.TestCase classes. + """ + + def assertParseResultsEquals( + self, result, expected_list=None, expected_dict=None, msg=None + ): + """ + Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``, + and compare any defined results names with an optional ``expected_dict``. + """ + if expected_list is not None: + self.assertEqual(expected_list, result.as_list(), msg=msg) + if expected_dict is not None: + self.assertEqual(expected_dict, result.as_dict(), msg=msg) + + def assertParseAndCheckList( + self, expr, test_string, expected_list, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asList()`` is equal to the ``expected_list``. + """ + result = expr.parse_string(test_string, parse_all=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg) + + def assertParseAndCheckDict( + self, expr, test_string, expected_dict, msg=None, verbose=True + ): + """ + Convenience wrapper assert to test a parser element and input string, and assert that + the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``. + """ + result = expr.parse_string(test_string, parseAll=True) + if verbose: + print(result.dump()) + else: + print(result.as_list()) + self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg) + + def assertRunTestResults( + self, run_tests_report, expected_parse_results=None, msg=None + ): + """ + Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of + list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped + with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``. + Finally, asserts that the overall ``runTests()`` success value is ``True``. + + :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests + :param expected_parse_results (optional): [tuple(str, list, dict, Exception)] + """ + run_test_success, run_test_results = run_tests_report + + if expected_parse_results is not None: + merged = [ + (*rpt, expected) + for rpt, expected in zip(run_test_results, expected_parse_results) + ] + for test_string, result, expected in merged: + # expected should be a tuple containing a list and/or a dict or an exception, + # and optional failure message string + # an empty tuple will skip any result validation + fail_msg = next( + (exp for exp in expected if isinstance(exp, str)), None + ) + expected_exception = next( + ( + exp + for exp in expected + if isinstance(exp, type) and issubclass(exp, Exception) + ), + None, + ) + if expected_exception is not None: + with self.assertRaises( + expected_exception=expected_exception, msg=fail_msg or msg + ): + if isinstance(result, Exception): + raise result + else: + expected_list = next( + (exp for exp in expected if isinstance(exp, list)), None + ) + expected_dict = next( + (exp for exp in expected if isinstance(exp, dict)), None + ) + if (expected_list, expected_dict) != (None, None): + self.assertParseResultsEquals( + result, + expected_list=expected_list, + expected_dict=expected_dict, + msg=fail_msg or msg, + ) + else: + # warning here maybe? + print("no validation for {!r}".format(test_string)) + + # do this last, in case some specific test results can be reported instead + self.assertTrue( + run_test_success, msg=msg if msg is not None else "failed runTests" + ) + + @contextmanager + def assertRaisesParseException(self, exc_type=ParseException, msg=None): + with self.assertRaises(exc_type, msg=msg): + yield + + @staticmethod + def with_line_numbers( + s: str, + start_line: typing.Optional[int] = None, + end_line: typing.Optional[int] = None, + expand_tabs: bool = True, + eol_mark: str = "|", + mark_spaces: typing.Optional[str] = None, + mark_control: typing.Optional[str] = None, + ) -> str: + """ + Helpful method for debugging a parser - prints a string with line and column numbers. + (Line and column numbers are 1-based.) + + :param s: tuple(bool, str - string to be printed with line and column numbers + :param start_line: int - (optional) starting line number in s to print (default=1) + :param end_line: int - (optional) ending line number in s to print (default=len(s)) + :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default + :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|") + :param mark_spaces: str - (optional) special character to display in place of spaces + :param mark_control: str - (optional) convert non-printing control characters to a placeholding + character; valid values: + - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊" + - any single character string - replace control characters with given string + - None (default) - string is displayed as-is + + :return: str - input string with leading line numbers and column number headers + """ + if expand_tabs: + s = s.expandtabs() + if mark_control is not None: + if mark_control == "unicode": + tbl = str.maketrans( + {c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))} + | {127: 0x2421} + ) + eol_mark = "" + else: + tbl = str.maketrans( + {c: mark_control for c in list(range(0, 32)) + [127]} + ) + s = s.translate(tbl) + if mark_spaces is not None and mark_spaces != " ": + if mark_spaces == "unicode": + tbl = str.maketrans({9: 0x2409, 32: 0x2423}) + s = s.translate(tbl) + else: + s = s.replace(" ", mark_spaces) + if start_line is None: + start_line = 1 + if end_line is None: + end_line = len(s) + end_line = min(end_line, len(s)) + start_line = min(max(1, start_line), end_line) + + if mark_control != "unicode": + s_lines = s.splitlines()[start_line - 1 : end_line] + else: + s_lines = [line + "␊" for line in s.split("␊")[start_line - 1 : end_line]] + if not s_lines: + return "" + + lineno_width = len(str(end_line)) + max_line_len = max(len(line) for line in s_lines) + lead = " " * (lineno_width + 1) + if max_line_len >= 99: + header0 = ( + lead + + "".join( + "{}{}".format(" " * 99, (i + 1) % 100) + for i in range(max(max_line_len // 100, 1)) + ) + + "\n" + ) + else: + header0 = "" + header1 = ( + header0 + + lead + + "".join( + " {}".format((i + 1) % 10) + for i in range(-(-max_line_len // 10)) + ) + + "\n" + ) + header2 = lead + "1234567890" * (-(-max_line_len // 10)) + "\n" + return ( + header1 + + header2 + + "\n".join( + "{:{}d}:{}{}".format(i, lineno_width, line, eol_mark) + for i, line in enumerate(s_lines, start=start_line) + ) + + "\n" + ) diff --git a/src/poetry/core/_vendor/pyparsing/unicode.py b/src/poetry/core/_vendor/pyparsing/unicode.py new file mode 100644 index 0000000..0652620 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/unicode.py @@ -0,0 +1,352 @@ +# unicode.py + +import sys +from itertools import filterfalse +from typing import List, Tuple, Union + + +class _lazyclassproperty: + def __init__(self, fn): + self.fn = fn + self.__doc__ = fn.__doc__ + self.__name__ = fn.__name__ + + def __get__(self, obj, cls): + if cls is None: + cls = type(obj) + if not hasattr(cls, "_intern") or any( + cls._intern is getattr(superclass, "_intern", []) + for superclass in cls.__mro__[1:] + ): + cls._intern = {} + attrname = self.fn.__name__ + if attrname not in cls._intern: + cls._intern[attrname] = self.fn(cls) + return cls._intern[attrname] + + +UnicodeRangeList = List[Union[Tuple[int, int], Tuple[int]]] + + +class unicode_set: + """ + A set of Unicode characters, for language-specific strings for + ``alphas``, ``nums``, ``alphanums``, and ``printables``. + A unicode_set is defined by a list of ranges in the Unicode character + set, in a class attribute ``_ranges``. Ranges can be specified using + 2-tuples or a 1-tuple, such as:: + + _ranges = [ + (0x0020, 0x007e), + (0x00a0, 0x00ff), + (0x0100,), + ] + + Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x). + + A unicode set can also be defined using multiple inheritance of other unicode sets:: + + class CJK(Chinese, Japanese, Korean): + pass + """ + + _ranges: UnicodeRangeList = [] + + @_lazyclassproperty + def _chars_for_ranges(cls): + ret = [] + for cc in cls.__mro__: + if cc is unicode_set: + break + for rr in getattr(cc, "_ranges", ()): + ret.extend(range(rr[0], rr[-1] + 1)) + return [chr(c) for c in sorted(set(ret))] + + @_lazyclassproperty + def printables(cls): + "all non-whitespace characters in this range" + return "".join(filterfalse(str.isspace, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphas(cls): + "all alphabetic characters in this range" + return "".join(filter(str.isalpha, cls._chars_for_ranges)) + + @_lazyclassproperty + def nums(cls): + "all numeric digit characters in this range" + return "".join(filter(str.isdigit, cls._chars_for_ranges)) + + @_lazyclassproperty + def alphanums(cls): + "all alphanumeric characters in this range" + return cls.alphas + cls.nums + + @_lazyclassproperty + def identchars(cls): + "all characters in this range that are valid identifier characters, plus underscore '_'" + return "".join( + sorted( + set( + "".join(filter(str.isidentifier, cls._chars_for_ranges)) + + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº" + + "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ" + + "_" + ) + ) + ) + + @_lazyclassproperty + def identbodychars(cls): + """ + all characters in this range that are valid identifier body characters, + plus the digits 0-9 + """ + return "".join( + sorted( + set( + cls.identchars + + "0123456789" + + "".join( + [c for c in cls._chars_for_ranges if ("_" + c).isidentifier()] + ) + ) + ) + ) + + +class pyparsing_unicode(unicode_set): + """ + A namespace class for defining common language unicode_sets. + """ + + # fmt: off + + # define ranges in language character sets + _ranges: UnicodeRangeList = [ + (0x0020, sys.maxunicode), + ] + + class BasicMultilingualPlane(unicode_set): + "Unicode set for the Basic Multilingual Plane" + _ranges: UnicodeRangeList = [ + (0x0020, 0xFFFF), + ] + + class Latin1(unicode_set): + "Unicode set for Latin-1 Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0020, 0x007E), + (0x00A0, 0x00FF), + ] + + class LatinA(unicode_set): + "Unicode set for Latin-A Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0100, 0x017F), + ] + + class LatinB(unicode_set): + "Unicode set for Latin-B Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0180, 0x024F), + ] + + class Greek(unicode_set): + "Unicode set for Greek Unicode Character Ranges" + _ranges: UnicodeRangeList = [ + (0x0342, 0x0345), + (0x0370, 0x0377), + (0x037A, 0x037F), + (0x0384, 0x038A), + (0x038C,), + (0x038E, 0x03A1), + (0x03A3, 0x03E1), + (0x03F0, 0x03FF), + (0x1D26, 0x1D2A), + (0x1D5E,), + (0x1D60,), + (0x1D66, 0x1D6A), + (0x1F00, 0x1F15), + (0x1F18, 0x1F1D), + (0x1F20, 0x1F45), + (0x1F48, 0x1F4D), + (0x1F50, 0x1F57), + (0x1F59,), + (0x1F5B,), + (0x1F5D,), + (0x1F5F, 0x1F7D), + (0x1F80, 0x1FB4), + (0x1FB6, 0x1FC4), + (0x1FC6, 0x1FD3), + (0x1FD6, 0x1FDB), + (0x1FDD, 0x1FEF), + (0x1FF2, 0x1FF4), + (0x1FF6, 0x1FFE), + (0x2129,), + (0x2719, 0x271A), + (0xAB65,), + (0x10140, 0x1018D), + (0x101A0,), + (0x1D200, 0x1D245), + (0x1F7A1, 0x1F7A7), + ] + + class Cyrillic(unicode_set): + "Unicode set for Cyrillic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0400, 0x052F), + (0x1C80, 0x1C88), + (0x1D2B,), + (0x1D78,), + (0x2DE0, 0x2DFF), + (0xA640, 0xA672), + (0xA674, 0xA69F), + (0xFE2E, 0xFE2F), + ] + + class Chinese(unicode_set): + "Unicode set for Chinese Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x2E80, 0x2E99), + (0x2E9B, 0x2EF3), + (0x31C0, 0x31E3), + (0x3400, 0x4DB5), + (0x4E00, 0x9FEF), + (0xA700, 0xA707), + (0xF900, 0xFA6D), + (0xFA70, 0xFAD9), + (0x16FE2, 0x16FE3), + (0x1F210, 0x1F212), + (0x1F214, 0x1F23B), + (0x1F240, 0x1F248), + (0x20000, 0x2A6D6), + (0x2A700, 0x2B734), + (0x2B740, 0x2B81D), + (0x2B820, 0x2CEA1), + (0x2CEB0, 0x2EBE0), + (0x2F800, 0x2FA1D), + ] + + class Japanese(unicode_set): + "Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges" + _ranges: UnicodeRangeList = [] + + class Kanji(unicode_set): + "Unicode set for Kanji Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x4E00, 0x9FBF), + (0x3000, 0x303F), + ] + + class Hiragana(unicode_set): + "Unicode set for Hiragana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3041, 0x3096), + (0x3099, 0x30A0), + (0x30FC,), + (0xFF70,), + (0x1B001,), + (0x1B150, 0x1B152), + (0x1F200,), + ] + + class Katakana(unicode_set): + "Unicode set for Katakana Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x3099, 0x309C), + (0x30A0, 0x30FF), + (0x31F0, 0x31FF), + (0x32D0, 0x32FE), + (0xFF65, 0xFF9F), + (0x1B000,), + (0x1B164, 0x1B167), + (0x1F201, 0x1F202), + (0x1F213,), + ] + + class Hangul(unicode_set): + "Unicode set for Hangul (Korean) Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x1100, 0x11FF), + (0x302E, 0x302F), + (0x3131, 0x318E), + (0x3200, 0x321C), + (0x3260, 0x327B), + (0x327E,), + (0xA960, 0xA97C), + (0xAC00, 0xD7A3), + (0xD7B0, 0xD7C6), + (0xD7CB, 0xD7FB), + (0xFFA0, 0xFFBE), + (0xFFC2, 0xFFC7), + (0xFFCA, 0xFFCF), + (0xFFD2, 0xFFD7), + (0xFFDA, 0xFFDC), + ] + + Korean = Hangul + + class CJK(Chinese, Japanese, Hangul): + "Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range" + + class Thai(unicode_set): + "Unicode set for Thai Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0E01, 0x0E3A), + (0x0E3F, 0x0E5B) + ] + + class Arabic(unicode_set): + "Unicode set for Arabic Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0600, 0x061B), + (0x061E, 0x06FF), + (0x0700, 0x077F), + ] + + class Hebrew(unicode_set): + "Unicode set for Hebrew Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0591, 0x05C7), + (0x05D0, 0x05EA), + (0x05EF, 0x05F4), + (0xFB1D, 0xFB36), + (0xFB38, 0xFB3C), + (0xFB3E,), + (0xFB40, 0xFB41), + (0xFB43, 0xFB44), + (0xFB46, 0xFB4F), + ] + + class Devanagari(unicode_set): + "Unicode set for Devanagari Unicode Character Range" + _ranges: UnicodeRangeList = [ + (0x0900, 0x097F), + (0xA8E0, 0xA8FF) + ] + + # fmt: on + + +pyparsing_unicode.Japanese._ranges = ( + pyparsing_unicode.Japanese.Kanji._ranges + + pyparsing_unicode.Japanese.Hiragana._ranges + + pyparsing_unicode.Japanese.Katakana._ranges +) + +pyparsing_unicode.BMP = pyparsing_unicode.BasicMultilingualPlane + +# add language identifiers using language Unicode +pyparsing_unicode.العربية = pyparsing_unicode.Arabic +pyparsing_unicode.中文 = pyparsing_unicode.Chinese +pyparsing_unicode.кириллица = pyparsing_unicode.Cyrillic +pyparsing_unicode.Ελληνικά = pyparsing_unicode.Greek +pyparsing_unicode.עִברִית = pyparsing_unicode.Hebrew +pyparsing_unicode.日本語 = pyparsing_unicode.Japanese +pyparsing_unicode.Japanese.漢字 = pyparsing_unicode.Japanese.Kanji +pyparsing_unicode.Japanese.カタカナ = pyparsing_unicode.Japanese.Katakana +pyparsing_unicode.Japanese.ひらがな = pyparsing_unicode.Japanese.Hiragana +pyparsing_unicode.한국어 = pyparsing_unicode.Korean +pyparsing_unicode.ไทย = pyparsing_unicode.Thai +pyparsing_unicode.देवनागरी = pyparsing_unicode.Devanagari diff --git a/src/poetry/core/_vendor/pyparsing/util.py b/src/poetry/core/_vendor/pyparsing/util.py new file mode 100644 index 0000000..34ce092 --- /dev/null +++ b/src/poetry/core/_vendor/pyparsing/util.py @@ -0,0 +1,235 @@ +# util.py +import warnings +import types +import collections +import itertools +from functools import lru_cache +from typing import List, Union, Iterable + +_bslash = chr(92) + + +class __config_flags: + """Internal class for defining compatibility and debugging flags""" + + _all_names: List[str] = [] + _fixed_names: List[str] = [] + _type_desc = "configuration" + + @classmethod + def _set(cls, dname, value): + if dname in cls._fixed_names: + warnings.warn( + "{}.{} {} is {} and cannot be overridden".format( + cls.__name__, + dname, + cls._type_desc, + str(getattr(cls, dname)).upper(), + ) + ) + return + if dname in cls._all_names: + setattr(cls, dname, value) + else: + raise ValueError("no such {} {!r}".format(cls._type_desc, dname)) + + enable = classmethod(lambda cls, name: cls._set(name, True)) + disable = classmethod(lambda cls, name: cls._set(name, False)) + + +@lru_cache(maxsize=128) +def col(loc: int, strg: str) -> int: + """ + Returns current column within a string, counting newlines as line separators. + The first column is number 1. + + Note: the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See + :class:`ParserElement.parseString` for more + information on parsing strings containing ```` s, and suggested + methods to maintain a consistent view of the parsed string, the parse + location, and line and column positions within the parsed string. + """ + s = strg + return 1 if 0 < loc < len(s) and s[loc - 1] == "\n" else loc - s.rfind("\n", 0, loc) + + +@lru_cache(maxsize=128) +def lineno(loc: int, strg: str) -> int: + """Returns current line number within a string, counting newlines as line separators. + The first line is number 1. + + Note - the default parsing behavior is to expand tabs in the input string + before starting the parsing process. See :class:`ParserElement.parseString` + for more information on parsing strings containing ```` s, and + suggested methods to maintain a consistent view of the parsed string, the + parse location, and line and column positions within the parsed string. + """ + return strg.count("\n", 0, loc) + 1 + + +@lru_cache(maxsize=128) +def line(loc: int, strg: str) -> str: + """ + Returns the line of text containing loc within a string, counting newlines as line separators. + """ + last_cr = strg.rfind("\n", 0, loc) + next_cr = strg.find("\n", loc) + return strg[last_cr + 1 : next_cr] if next_cr >= 0 else strg[last_cr + 1 :] + + +class _UnboundedCache: + def __init__(self): + cache = {} + cache_get = cache.get + self.not_in_cache = not_in_cache = object() + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + + def clear(_): + cache.clear() + + self.size = None + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class _FifoCache: + def __init__(self, size): + self.not_in_cache = not_in_cache = object() + cache = collections.OrderedDict() + cache_get = cache.get + + def get(_, key): + return cache_get(key, not_in_cache) + + def set_(_, key, value): + cache[key] = value + while len(cache) > size: + cache.popitem(last=False) + + def clear(_): + cache.clear() + + self.size = size + self.get = types.MethodType(get, self) + self.set = types.MethodType(set_, self) + self.clear = types.MethodType(clear, self) + + +class LRUMemo: + """ + A memoizing mapping that retains `capacity` deleted items + + The memo tracks retained items by their access order; once `capacity` items + are retained, the least recently used item is discarded. + """ + + def __init__(self, capacity): + self._capacity = capacity + self._active = {} + self._memory = collections.OrderedDict() + + def __getitem__(self, key): + try: + return self._active[key] + except KeyError: + self._memory.move_to_end(key) + return self._memory[key] + + def __setitem__(self, key, value): + self._memory.pop(key, None) + self._active[key] = value + + def __delitem__(self, key): + try: + value = self._active.pop(key) + except KeyError: + pass + else: + while len(self._memory) >= self._capacity: + self._memory.popitem(last=False) + self._memory[key] = value + + def clear(self): + self._active.clear() + self._memory.clear() + + +class UnboundedMemo(dict): + """ + A memoizing mapping that retains all deleted items + """ + + def __delitem__(self, key): + pass + + +def _escape_regex_range_chars(s: str) -> str: + # escape these chars: ^-[] + for c in r"\^-[]": + s = s.replace(c, _bslash + c) + s = s.replace("\n", r"\n") + s = s.replace("\t", r"\t") + return str(s) + + +def _collapse_string_to_ranges( + s: Union[str, Iterable[str]], re_escape: bool = True +) -> str: + def is_consecutive(c): + c_int = ord(c) + is_consecutive.prev, prev = c_int, is_consecutive.prev + if c_int - prev > 1: + is_consecutive.value = next(is_consecutive.counter) + return is_consecutive.value + + is_consecutive.prev = 0 + is_consecutive.counter = itertools.count() + is_consecutive.value = -1 + + def escape_re_range_char(c): + return "\\" + c if c in r"\^-][" else c + + def no_escape_re_range_char(c): + return c + + if not re_escape: + escape_re_range_char = no_escape_re_range_char + + ret = [] + s = "".join(sorted(set(s))) + if len(s) > 3: + for _, chars in itertools.groupby(s, key=is_consecutive): + first = last = next(chars) + last = collections.deque( + itertools.chain(iter([last]), chars), maxlen=1 + ).pop() + if first == last: + ret.append(escape_re_range_char(first)) + else: + sep = "" if ord(last) == ord(first) + 1 else "-" + ret.append( + "{}{}{}".format( + escape_re_range_char(first), sep, escape_re_range_char(last) + ) + ) + else: + ret = [escape_re_range_char(c) for c in s] + + return "".join(ret) + + +def _flatten(ll: list) -> list: + ret = [] + for i in ll: + if isinstance(i, list): + ret.extend(_flatten(i)) + else: + ret.append(i) + return ret diff --git a/src/poetry/core/_vendor/pyrsistent/LICENSE.mit b/src/poetry/core/_vendor/pyrsistent/LICENSE.mit new file mode 100644 index 0000000..6cbf251 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/LICENSE.mit @@ -0,0 +1,22 @@ +Copyright (c) 2022 Tobias Gustafsson + +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/src/poetry/core/_vendor/pyrsistent/__init__.py b/src/poetry/core/_vendor/pyrsistent/__init__.py new file mode 100644 index 0000000..be29965 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/__init__.py @@ -0,0 +1,47 @@ +# -*- coding: utf-8 -*- + +from pyrsistent._pmap import pmap, m, PMap + +from pyrsistent._pvector import pvector, v, PVector + +from pyrsistent._pset import pset, s, PSet + +from pyrsistent._pbag import pbag, b, PBag + +from pyrsistent._plist import plist, l, PList + +from pyrsistent._pdeque import pdeque, dq, PDeque + +from pyrsistent._checked_types import ( + CheckedPMap, CheckedPVector, CheckedPSet, InvariantException, CheckedKeyTypeError, + CheckedValueTypeError, CheckedType, optional) + +from pyrsistent._field_common import ( + field, PTypeError, pset_field, pmap_field, pvector_field) + +from pyrsistent._precord import PRecord + +from pyrsistent._pclass import PClass, PClassMeta + +from pyrsistent._immutable import immutable + +from pyrsistent._helpers import freeze, thaw, mutant + +from pyrsistent._transformations import inc, discard, rex, ny + +from pyrsistent._toolz import get_in + + +__all__ = ('pmap', 'm', 'PMap', + 'pvector', 'v', 'PVector', + 'pset', 's', 'PSet', + 'pbag', 'b', 'PBag', + 'plist', 'l', 'PList', + 'pdeque', 'dq', 'PDeque', + 'CheckedPMap', 'CheckedPVector', 'CheckedPSet', 'InvariantException', 'CheckedKeyTypeError', 'CheckedValueTypeError', 'CheckedType', 'optional', + 'PRecord', 'field', 'pset_field', 'pmap_field', 'pvector_field', + 'PClass', 'PClassMeta', + 'immutable', + 'freeze', 'thaw', 'mutant', + 'get_in', + 'inc', 'discard', 'rex', 'ny') diff --git a/src/poetry/core/_vendor/pyrsistent/_checked_types.py b/src/poetry/core/_vendor/pyrsistent/_checked_types.py new file mode 100644 index 0000000..8ab8c2a --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_checked_types.py @@ -0,0 +1,542 @@ +from enum import Enum + +from abc import abstractmethod, ABCMeta +from collections.abc import Iterable + +from pyrsistent._pmap import PMap, pmap +from pyrsistent._pset import PSet, pset +from pyrsistent._pvector import PythonPVector, python_pvector + + +class CheckedType(object): + """ + Marker class to enable creation and serialization of checked object graphs. + """ + __slots__ = () + + @classmethod + @abstractmethod + def create(cls, source_data, _factory_fields=None): + raise NotImplementedError() + + @abstractmethod + def serialize(self, format=None): + raise NotImplementedError() + + +def _restore_pickle(cls, data): + return cls.create(data, _factory_fields=set()) + + +class InvariantException(Exception): + """ + Exception raised from a :py:class:`CheckedType` when invariant tests fail or when a mandatory + field is missing. + + Contains two fields of interest: + invariant_errors, a tuple of error data for the failing invariants + missing_fields, a tuple of strings specifying the missing names + """ + + def __init__(self, error_codes=(), missing_fields=(), *args, **kwargs): + self.invariant_errors = tuple(e() if callable(e) else e for e in error_codes) + self.missing_fields = missing_fields + super(InvariantException, self).__init__(*args, **kwargs) + + def __str__(self): + return super(InvariantException, self).__str__() + \ + ", invariant_errors=[{invariant_errors}], missing_fields=[{missing_fields}]".format( + invariant_errors=', '.join(str(e) for e in self.invariant_errors), + missing_fields=', '.join(self.missing_fields)) + + +_preserved_iterable_types = ( + Enum, +) +"""Some types are themselves iterable, but we want to use the type itself and +not its members for the type specification. This defines a set of such types +that we explicitly preserve. + +Note that strings are not such types because the string inputs we pass in are +values, not types. +""" + + +def maybe_parse_user_type(t): + """Try to coerce a user-supplied type directive into a list of types. + + This function should be used in all places where a user specifies a type, + for consistency. + + The policy for what defines valid user input should be clear from the implementation. + """ + is_type = isinstance(t, type) + is_preserved = isinstance(t, type) and issubclass(t, _preserved_iterable_types) + is_string = isinstance(t, str) + is_iterable = isinstance(t, Iterable) + + if is_preserved: + return [t] + elif is_string: + return [t] + elif is_type and not is_iterable: + return [t] + elif is_iterable: + # Recur to validate contained types as well. + ts = t + return tuple(e for t in ts for e in maybe_parse_user_type(t)) + else: + # If this raises because `t` cannot be formatted, so be it. + raise TypeError( + 'Type specifications must be types or strings. Input: {}'.format(t) + ) + + +def maybe_parse_many_user_types(ts): + # Just a different name to communicate that you're parsing multiple user + # inputs. `maybe_parse_user_type` handles the iterable case anyway. + return maybe_parse_user_type(ts) + + +def _store_types(dct, bases, destination_name, source_name): + maybe_types = maybe_parse_many_user_types([ + d[source_name] + for d in ([dct] + [b.__dict__ for b in bases]) if source_name in d + ]) + + dct[destination_name] = maybe_types + + +def _merge_invariant_results(result): + verdict = True + data = [] + for verd, dat in result: + if not verd: + verdict = False + data.append(dat) + + return verdict, tuple(data) + + +def wrap_invariant(invariant): + # Invariant functions may return the outcome of several tests + # In those cases the results have to be merged before being passed + # back to the client. + def f(*args, **kwargs): + result = invariant(*args, **kwargs) + if isinstance(result[0], bool): + return result + + return _merge_invariant_results(result) + + return f + + +def _all_dicts(bases, seen=None): + """ + Yield each class in ``bases`` and each of their base classes. + """ + if seen is None: + seen = set() + for cls in bases: + if cls in seen: + continue + seen.add(cls) + yield cls.__dict__ + for b in _all_dicts(cls.__bases__, seen): + yield b + + +def store_invariants(dct, bases, destination_name, source_name): + # Invariants are inherited + invariants = [] + for ns in [dct] + list(_all_dicts(bases)): + try: + invariant = ns[source_name] + except KeyError: + continue + invariants.append(invariant) + + if not all(callable(invariant) for invariant in invariants): + raise TypeError('Invariants must be callable') + dct[destination_name] = tuple(wrap_invariant(inv) for inv in invariants) + + +class _CheckedTypeMeta(ABCMeta): + def __new__(mcs, name, bases, dct): + _store_types(dct, bases, '_checked_types', '__type__') + store_invariants(dct, bases, '_checked_invariants', '__invariant__') + + def default_serializer(self, _, value): + if isinstance(value, CheckedType): + return value.serialize() + return value + + dct.setdefault('__serializer__', default_serializer) + + dct['__slots__'] = () + + return super(_CheckedTypeMeta, mcs).__new__(mcs, name, bases, dct) + + +class CheckedTypeError(TypeError): + def __init__(self, source_class, expected_types, actual_type, actual_value, *args, **kwargs): + super(CheckedTypeError, self).__init__(*args, **kwargs) + self.source_class = source_class + self.expected_types = expected_types + self.actual_type = actual_type + self.actual_value = actual_value + + +class CheckedKeyTypeError(CheckedTypeError): + """ + Raised when trying to set a value using a key with a type that doesn't match the declared type. + + Attributes: + source_class -- The class of the collection + expected_types -- Allowed types + actual_type -- The non matching type + actual_value -- Value of the variable with the non matching type + """ + pass + + +class CheckedValueTypeError(CheckedTypeError): + """ + Raised when trying to set a value using a key with a type that doesn't match the declared type. + + Attributes: + source_class -- The class of the collection + expected_types -- Allowed types + actual_type -- The non matching type + actual_value -- Value of the variable with the non matching type + """ + pass + + +def _get_class(type_name): + module_name, class_name = type_name.rsplit('.', 1) + module = __import__(module_name, fromlist=[class_name]) + return getattr(module, class_name) + + +def get_type(typ): + if isinstance(typ, type): + return typ + + return _get_class(typ) + + +def get_types(typs): + return [get_type(typ) for typ in typs] + + +def _check_types(it, expected_types, source_class, exception_type=CheckedValueTypeError): + if expected_types: + for e in it: + if not any(isinstance(e, get_type(t)) for t in expected_types): + actual_type = type(e) + msg = "Type {source_class} can only be used with {expected_types}, not {actual_type}".format( + source_class=source_class.__name__, + expected_types=tuple(get_type(et).__name__ for et in expected_types), + actual_type=actual_type.__name__) + raise exception_type(source_class, expected_types, actual_type, e, msg) + + +def _invariant_errors(elem, invariants): + return [data for valid, data in (invariant(elem) for invariant in invariants) if not valid] + + +def _invariant_errors_iterable(it, invariants): + return sum([_invariant_errors(elem, invariants) for elem in it], []) + + +def optional(*typs): + """ Convenience function to specify that a value may be of any of the types in type 'typs' or None """ + return tuple(typs) + (type(None),) + + +def _checked_type_create(cls, source_data, _factory_fields=None, ignore_extra=False): + if isinstance(source_data, cls): + return source_data + + # Recursively apply create methods of checked types if the types of the supplied data + # does not match any of the valid types. + types = get_types(cls._checked_types) + checked_type = next((t for t in types if issubclass(t, CheckedType)), None) + if checked_type: + return cls([checked_type.create(data, ignore_extra=ignore_extra) + if not any(isinstance(data, t) for t in types) else data + for data in source_data]) + + return cls(source_data) + +class CheckedPVector(PythonPVector, CheckedType, metaclass=_CheckedTypeMeta): + """ + A CheckedPVector is a PVector which allows specifying type and invariant checks. + + >>> class Positives(CheckedPVector): + ... __type__ = (int, float) + ... __invariant__ = lambda n: (n >= 0, 'Negative') + ... + >>> Positives([1, 2, 3]) + Positives([1, 2, 3]) + """ + + __slots__ = () + + def __new__(cls, initial=()): + if type(initial) == PythonPVector: + return super(CheckedPVector, cls).__new__(cls, initial._count, initial._shift, initial._root, initial._tail) + + return CheckedPVector.Evolver(cls, python_pvector()).extend(initial).persistent() + + def set(self, key, value): + return self.evolver().set(key, value).persistent() + + def append(self, val): + return self.evolver().append(val).persistent() + + def extend(self, it): + return self.evolver().extend(it).persistent() + + create = classmethod(_checked_type_create) + + def serialize(self, format=None): + serializer = self.__serializer__ + return list(serializer(format, v) for v in self) + + def __reduce__(self): + # Pickling support + return _restore_pickle, (self.__class__, list(self),) + + class Evolver(PythonPVector.Evolver): + __slots__ = ('_destination_class', '_invariant_errors') + + def __init__(self, destination_class, vector): + super(CheckedPVector.Evolver, self).__init__(vector) + self._destination_class = destination_class + self._invariant_errors = [] + + def _check(self, it): + _check_types(it, self._destination_class._checked_types, self._destination_class) + error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants) + self._invariant_errors.extend(error_data) + + def __setitem__(self, key, value): + self._check([value]) + return super(CheckedPVector.Evolver, self).__setitem__(key, value) + + def append(self, elem): + self._check([elem]) + return super(CheckedPVector.Evolver, self).append(elem) + + def extend(self, it): + it = list(it) + self._check(it) + return super(CheckedPVector.Evolver, self).extend(it) + + def persistent(self): + if self._invariant_errors: + raise InvariantException(error_codes=self._invariant_errors) + + result = self._orig_pvector + if self.is_dirty() or (self._destination_class != type(self._orig_pvector)): + pv = super(CheckedPVector.Evolver, self).persistent().extend(self._extra_tail) + result = self._destination_class(pv) + self._reset(result) + + return result + + def __repr__(self): + return self.__class__.__name__ + "({0})".format(self.tolist()) + + __str__ = __repr__ + + def evolver(self): + return CheckedPVector.Evolver(self.__class__, self) + + +class CheckedPSet(PSet, CheckedType, metaclass=_CheckedTypeMeta): + """ + A CheckedPSet is a PSet which allows specifying type and invariant checks. + + >>> class Positives(CheckedPSet): + ... __type__ = (int, float) + ... __invariant__ = lambda n: (n >= 0, 'Negative') + ... + >>> Positives([1, 2, 3]) + Positives([1, 2, 3]) + """ + + __slots__ = () + + def __new__(cls, initial=()): + if type(initial) is PMap: + return super(CheckedPSet, cls).__new__(cls, initial) + + evolver = CheckedPSet.Evolver(cls, pset()) + for e in initial: + evolver.add(e) + + return evolver.persistent() + + def __repr__(self): + return self.__class__.__name__ + super(CheckedPSet, self).__repr__()[4:] + + def __str__(self): + return self.__repr__() + + def serialize(self, format=None): + serializer = self.__serializer__ + return set(serializer(format, v) for v in self) + + create = classmethod(_checked_type_create) + + def __reduce__(self): + # Pickling support + return _restore_pickle, (self.__class__, list(self),) + + def evolver(self): + return CheckedPSet.Evolver(self.__class__, self) + + class Evolver(PSet._Evolver): + __slots__ = ('_destination_class', '_invariant_errors') + + def __init__(self, destination_class, original_set): + super(CheckedPSet.Evolver, self).__init__(original_set) + self._destination_class = destination_class + self._invariant_errors = [] + + def _check(self, it): + _check_types(it, self._destination_class._checked_types, self._destination_class) + error_data = _invariant_errors_iterable(it, self._destination_class._checked_invariants) + self._invariant_errors.extend(error_data) + + def add(self, element): + self._check([element]) + self._pmap_evolver[element] = True + return self + + def persistent(self): + if self._invariant_errors: + raise InvariantException(error_codes=self._invariant_errors) + + if self.is_dirty() or self._destination_class != type(self._original_pset): + return self._destination_class(self._pmap_evolver.persistent()) + + return self._original_pset + + +class _CheckedMapTypeMeta(type): + def __new__(mcs, name, bases, dct): + _store_types(dct, bases, '_checked_key_types', '__key_type__') + _store_types(dct, bases, '_checked_value_types', '__value_type__') + store_invariants(dct, bases, '_checked_invariants', '__invariant__') + + def default_serializer(self, _, key, value): + sk = key + if isinstance(key, CheckedType): + sk = key.serialize() + + sv = value + if isinstance(value, CheckedType): + sv = value.serialize() + + return sk, sv + + dct.setdefault('__serializer__', default_serializer) + + dct['__slots__'] = () + + return super(_CheckedMapTypeMeta, mcs).__new__(mcs, name, bases, dct) + +# Marker object +_UNDEFINED_CHECKED_PMAP_SIZE = object() + + +class CheckedPMap(PMap, CheckedType, metaclass=_CheckedMapTypeMeta): + """ + A CheckedPMap is a PMap which allows specifying type and invariant checks. + + >>> class IntToFloatMap(CheckedPMap): + ... __key_type__ = int + ... __value_type__ = float + ... __invariant__ = lambda k, v: (int(v) == k, 'Invalid mapping') + ... + >>> IntToFloatMap({1: 1.5, 2: 2.25}) + IntToFloatMap({1: 1.5, 2: 2.25}) + """ + + __slots__ = () + + def __new__(cls, initial={}, size=_UNDEFINED_CHECKED_PMAP_SIZE): + if size is not _UNDEFINED_CHECKED_PMAP_SIZE: + return super(CheckedPMap, cls).__new__(cls, size, initial) + + evolver = CheckedPMap.Evolver(cls, pmap()) + for k, v in initial.items(): + evolver.set(k, v) + + return evolver.persistent() + + def evolver(self): + return CheckedPMap.Evolver(self.__class__, self) + + def __repr__(self): + return self.__class__.__name__ + "({0})".format(str(dict(self))) + + __str__ = __repr__ + + def serialize(self, format=None): + serializer = self.__serializer__ + return dict(serializer(format, k, v) for k, v in self.items()) + + @classmethod + def create(cls, source_data, _factory_fields=None): + if isinstance(source_data, cls): + return source_data + + # Recursively apply create methods of checked types if the types of the supplied data + # does not match any of the valid types. + key_types = get_types(cls._checked_key_types) + checked_key_type = next((t for t in key_types if issubclass(t, CheckedType)), None) + value_types = get_types(cls._checked_value_types) + checked_value_type = next((t for t in value_types if issubclass(t, CheckedType)), None) + + if checked_key_type or checked_value_type: + return cls(dict((checked_key_type.create(key) if checked_key_type and not any(isinstance(key, t) for t in key_types) else key, + checked_value_type.create(value) if checked_value_type and not any(isinstance(value, t) for t in value_types) else value) + for key, value in source_data.items())) + + return cls(source_data) + + def __reduce__(self): + # Pickling support + return _restore_pickle, (self.__class__, dict(self),) + + class Evolver(PMap._Evolver): + __slots__ = ('_destination_class', '_invariant_errors') + + def __init__(self, destination_class, original_map): + super(CheckedPMap.Evolver, self).__init__(original_map) + self._destination_class = destination_class + self._invariant_errors = [] + + def set(self, key, value): + _check_types([key], self._destination_class._checked_key_types, self._destination_class, CheckedKeyTypeError) + _check_types([value], self._destination_class._checked_value_types, self._destination_class) + self._invariant_errors.extend(data for valid, data in (invariant(key, value) + for invariant in self._destination_class._checked_invariants) + if not valid) + + return super(CheckedPMap.Evolver, self).set(key, value) + + def persistent(self): + if self._invariant_errors: + raise InvariantException(error_codes=self._invariant_errors) + + if self.is_dirty() or type(self._original_pmap) != self._destination_class: + return self._destination_class(self._buckets_evolver.persistent(), self._size) + + return self._original_pmap diff --git a/src/poetry/core/_vendor/pyrsistent/_field_common.py b/src/poetry/core/_vendor/pyrsistent/_field_common.py new file mode 100644 index 0000000..508dd2f --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_field_common.py @@ -0,0 +1,332 @@ +from pyrsistent._checked_types import ( + CheckedPMap, + CheckedPSet, + CheckedPVector, + CheckedType, + InvariantException, + _restore_pickle, + get_type, + maybe_parse_user_type, + maybe_parse_many_user_types, +) +from pyrsistent._checked_types import optional as optional_type +from pyrsistent._checked_types import wrap_invariant +import inspect + + +def set_fields(dct, bases, name): + dct[name] = dict(sum([list(b.__dict__.get(name, {}).items()) for b in bases], [])) + + for k, v in list(dct.items()): + if isinstance(v, _PField): + dct[name][k] = v + del dct[k] + + +def check_global_invariants(subject, invariants): + error_codes = tuple(error_code for is_ok, error_code in + (invariant(subject) for invariant in invariants) if not is_ok) + if error_codes: + raise InvariantException(error_codes, (), 'Global invariant failed') + + +def serialize(serializer, format, value): + if isinstance(value, CheckedType) and serializer is PFIELD_NO_SERIALIZER: + return value.serialize(format) + + return serializer(format, value) + + +def check_type(destination_cls, field, name, value): + if field.type and not any(isinstance(value, get_type(t)) for t in field.type): + actual_type = type(value) + message = "Invalid type for field {0}.{1}, was {2}".format(destination_cls.__name__, name, actual_type.__name__) + raise PTypeError(destination_cls, name, field.type, actual_type, message) + + +def is_type_cls(type_cls, field_type): + if type(field_type) is set: + return True + types = tuple(field_type) + if len(types) == 0: + return False + return issubclass(get_type(types[0]), type_cls) + + +def is_field_ignore_extra_complaint(type_cls, field, ignore_extra): + # ignore_extra param has default False value, for speed purpose no need to propagate False + if not ignore_extra: + return False + + if not is_type_cls(type_cls, field.type): + return False + + return 'ignore_extra' in inspect.signature(field.factory).parameters + + + +class _PField(object): + __slots__ = ('type', 'invariant', 'initial', 'mandatory', '_factory', 'serializer') + + def __init__(self, type, invariant, initial, mandatory, factory, serializer): + self.type = type + self.invariant = invariant + self.initial = initial + self.mandatory = mandatory + self._factory = factory + self.serializer = serializer + + @property + def factory(self): + # If no factory is specified and the type is another CheckedType use the factory method of that CheckedType + if self._factory is PFIELD_NO_FACTORY and len(self.type) == 1: + typ = get_type(tuple(self.type)[0]) + if issubclass(typ, CheckedType): + return typ.create + + return self._factory + +PFIELD_NO_TYPE = () +PFIELD_NO_INVARIANT = lambda _: (True, None) +PFIELD_NO_FACTORY = lambda x: x +PFIELD_NO_INITIAL = object() +PFIELD_NO_SERIALIZER = lambda _, value: value + + +def field(type=PFIELD_NO_TYPE, invariant=PFIELD_NO_INVARIANT, initial=PFIELD_NO_INITIAL, + mandatory=False, factory=PFIELD_NO_FACTORY, serializer=PFIELD_NO_SERIALIZER): + """ + Field specification factory for :py:class:`PRecord`. + + :param type: a type or iterable with types that are allowed for this field + :param invariant: a function specifying an invariant that must hold for the field + :param initial: value of field if not specified when instantiating the record + :param mandatory: boolean specifying if the field is mandatory or not + :param factory: function called when field is set. + :param serializer: function that returns a serialized version of the field + """ + + # NB: We have to check this predicate separately from the predicates in + # `maybe_parse_user_type` et al. because this one is related to supporting + # the argspec for `field`, while those are related to supporting the valid + # ways to specify types. + + # Multiple types must be passed in one of the following containers. Note + # that a type that is a subclass of one of these containers, like a + # `collections.namedtuple`, will work as expected, since we check + # `isinstance` and not `issubclass`. + if isinstance(type, (list, set, tuple)): + types = set(maybe_parse_many_user_types(type)) + else: + types = set(maybe_parse_user_type(type)) + + invariant_function = wrap_invariant(invariant) if invariant != PFIELD_NO_INVARIANT and callable(invariant) else invariant + field = _PField(type=types, invariant=invariant_function, initial=initial, + mandatory=mandatory, factory=factory, serializer=serializer) + + _check_field_parameters(field) + + return field + + +def _check_field_parameters(field): + for t in field.type: + if not isinstance(t, type) and not isinstance(t, str): + raise TypeError('Type parameter expected, not {0}'.format(type(t))) + + if field.initial is not PFIELD_NO_INITIAL and \ + not callable(field.initial) and \ + field.type and not any(isinstance(field.initial, t) for t in field.type): + raise TypeError('Initial has invalid type {0}'.format(type(field.initial))) + + if not callable(field.invariant): + raise TypeError('Invariant must be callable') + + if not callable(field.factory): + raise TypeError('Factory must be callable') + + if not callable(field.serializer): + raise TypeError('Serializer must be callable') + + +class PTypeError(TypeError): + """ + Raised when trying to assign a value with a type that doesn't match the declared type. + + Attributes: + source_class -- The class of the record + field -- Field name + expected_types -- Types allowed for the field + actual_type -- The non matching type + """ + def __init__(self, source_class, field, expected_types, actual_type, *args, **kwargs): + super(PTypeError, self).__init__(*args, **kwargs) + self.source_class = source_class + self.field = field + self.expected_types = expected_types + self.actual_type = actual_type + + +SEQ_FIELD_TYPE_SUFFIXES = { + CheckedPVector: "PVector", + CheckedPSet: "PSet", +} + +# Global dictionary to hold auto-generated field types: used for unpickling +_seq_field_types = {} + +def _restore_seq_field_pickle(checked_class, item_type, data): + """Unpickling function for auto-generated PVec/PSet field types.""" + type_ = _seq_field_types[checked_class, item_type] + return _restore_pickle(type_, data) + +def _types_to_names(types): + """Convert a tuple of types to a human-readable string.""" + return "".join(get_type(typ).__name__.capitalize() for typ in types) + +def _make_seq_field_type(checked_class, item_type, item_invariant): + """Create a subclass of the given checked class with the given item type.""" + type_ = _seq_field_types.get((checked_class, item_type)) + if type_ is not None: + return type_ + + class TheType(checked_class): + __type__ = item_type + __invariant__ = item_invariant + + def __reduce__(self): + return (_restore_seq_field_pickle, + (checked_class, item_type, list(self))) + + suffix = SEQ_FIELD_TYPE_SUFFIXES[checked_class] + TheType.__name__ = _types_to_names(TheType._checked_types) + suffix + _seq_field_types[checked_class, item_type] = TheType + return TheType + +def _sequence_field(checked_class, item_type, optional, initial, + invariant=PFIELD_NO_INVARIANT, + item_invariant=PFIELD_NO_INVARIANT): + """ + Create checked field for either ``PSet`` or ``PVector``. + + :param checked_class: ``CheckedPSet`` or ``CheckedPVector``. + :param item_type: The required type for the items in the set. + :param optional: If true, ``None`` can be used as a value for + this field. + :param initial: Initial value to pass to factory. + + :return: A ``field`` containing a checked class. + """ + TheType = _make_seq_field_type(checked_class, item_type, item_invariant) + + if optional: + def factory(argument, _factory_fields=None, ignore_extra=False): + if argument is None: + return None + else: + return TheType.create(argument, _factory_fields=_factory_fields, ignore_extra=ignore_extra) + else: + factory = TheType.create + + return field(type=optional_type(TheType) if optional else TheType, + factory=factory, mandatory=True, + invariant=invariant, + initial=factory(initial)) + + +def pset_field(item_type, optional=False, initial=(), + invariant=PFIELD_NO_INVARIANT, + item_invariant=PFIELD_NO_INVARIANT): + """ + Create checked ``PSet`` field. + + :param item_type: The required type for the items in the set. + :param optional: If true, ``None`` can be used as a value for + this field. + :param initial: Initial value to pass to factory if no value is given + for the field. + + :return: A ``field`` containing a ``CheckedPSet`` of the given type. + """ + return _sequence_field(CheckedPSet, item_type, optional, initial, + invariant=invariant, + item_invariant=item_invariant) + + +def pvector_field(item_type, optional=False, initial=(), + invariant=PFIELD_NO_INVARIANT, + item_invariant=PFIELD_NO_INVARIANT): + """ + Create checked ``PVector`` field. + + :param item_type: The required type for the items in the vector. + :param optional: If true, ``None`` can be used as a value for + this field. + :param initial: Initial value to pass to factory if no value is given + for the field. + + :return: A ``field`` containing a ``CheckedPVector`` of the given type. + """ + return _sequence_field(CheckedPVector, item_type, optional, initial, + invariant=invariant, + item_invariant=item_invariant) + + +_valid = lambda item: (True, "") + + +# Global dictionary to hold auto-generated field types: used for unpickling +_pmap_field_types = {} + +def _restore_pmap_field_pickle(key_type, value_type, data): + """Unpickling function for auto-generated PMap field types.""" + type_ = _pmap_field_types[key_type, value_type] + return _restore_pickle(type_, data) + +def _make_pmap_field_type(key_type, value_type): + """Create a subclass of CheckedPMap with the given key and value types.""" + type_ = _pmap_field_types.get((key_type, value_type)) + if type_ is not None: + return type_ + + class TheMap(CheckedPMap): + __key_type__ = key_type + __value_type__ = value_type + + def __reduce__(self): + return (_restore_pmap_field_pickle, + (self.__key_type__, self.__value_type__, dict(self))) + + TheMap.__name__ = "{0}To{1}PMap".format( + _types_to_names(TheMap._checked_key_types), + _types_to_names(TheMap._checked_value_types)) + _pmap_field_types[key_type, value_type] = TheMap + return TheMap + + +def pmap_field(key_type, value_type, optional=False, invariant=PFIELD_NO_INVARIANT): + """ + Create a checked ``PMap`` field. + + :param key: The required type for the keys of the map. + :param value: The required type for the values of the map. + :param optional: If true, ``None`` can be used as a value for + this field. + :param invariant: Pass-through to ``field``. + + :return: A ``field`` containing a ``CheckedPMap``. + """ + TheMap = _make_pmap_field_type(key_type, value_type) + + if optional: + def factory(argument): + if argument is None: + return None + else: + return TheMap.create(argument) + else: + factory = TheMap.create + + return field(mandatory=True, initial=TheMap(), + type=optional_type(TheMap) if optional else TheMap, + factory=factory, invariant=invariant) diff --git a/src/poetry/core/_vendor/pyrsistent/_helpers.py b/src/poetry/core/_vendor/pyrsistent/_helpers.py new file mode 100644 index 0000000..1320e65 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_helpers.py @@ -0,0 +1,97 @@ +from functools import wraps +from pyrsistent._pmap import PMap, pmap +from pyrsistent._pset import PSet, pset +from pyrsistent._pvector import PVector, pvector + +def freeze(o, strict=True): + """ + Recursively convert simple Python containers into pyrsistent versions + of those containers. + + - list is converted to pvector, recursively + - dict is converted to pmap, recursively on values (but not keys) + - set is converted to pset, but not recursively + - tuple is converted to tuple, recursively. + + If strict == True (default): + + - freeze is called on elements of pvectors + - freeze is called on values of pmaps + + Sets and dict keys are not recursively frozen because they do not contain + mutable data by convention. The main exception to this rule is that + dict keys and set elements are often instances of mutable objects that + support hash-by-id, which this function can't convert anyway. + + >>> freeze(set([1, 2])) + pset([1, 2]) + >>> freeze([1, {'a': 3}]) + pvector([1, pmap({'a': 3})]) + >>> freeze((1, [])) + (1, pvector([])) + """ + typ = type(o) + if typ is dict or (strict and isinstance(o, PMap)): + return pmap({k: freeze(v, strict) for k, v in o.items()}) + if typ is list or (strict and isinstance(o, PVector)): + curried_freeze = lambda x: freeze(x, strict) + return pvector(map(curried_freeze, o)) + if typ is tuple: + curried_freeze = lambda x: freeze(x, strict) + return tuple(map(curried_freeze, o)) + if typ is set: + # impossible to have anything that needs freezing inside a set or pset + return pset(o) + return o + + +def thaw(o, strict=True): + """ + Recursively convert pyrsistent containers into simple Python containers. + + - pvector is converted to list, recursively + - pmap is converted to dict, recursively on values (but not keys) + - pset is converted to set, but not recursively + - tuple is converted to tuple, recursively. + + If strict == True (the default): + + - thaw is called on elements of lists + - thaw is called on values in dicts + + >>> from pyrsistent import s, m, v + >>> thaw(s(1, 2)) + {1, 2} + >>> thaw(v(1, m(a=3))) + [1, {'a': 3}] + >>> thaw((1, v())) + (1, []) + """ + typ = type(o) + if isinstance(o, PVector) or (strict and typ is list): + curried_thaw = lambda x: thaw(x, strict) + return list(map(curried_thaw, o)) + if isinstance(o, PMap) or (strict and typ is dict): + return {k: thaw(v, strict) for k, v in o.items()} + if typ is tuple: + curried_thaw = lambda x: thaw(x, strict) + return tuple(map(curried_thaw, o)) + if isinstance(o, PSet): + # impossible to thaw inside psets or sets + return set(o) + return o + + +def mutant(fn): + """ + Convenience decorator to isolate mutation to within the decorated function (with respect + to the input arguments). + + All arguments to the decorated function will be frozen so that they are guaranteed not to change. + The return value is also frozen. + """ + @wraps(fn) + def inner_f(*args, **kwargs): + return freeze(fn(*[freeze(e) for e in args], **dict(freeze(item) for item in kwargs.items()))) + + return inner_f diff --git a/src/poetry/core/_vendor/pyrsistent/_immutable.py b/src/poetry/core/_vendor/pyrsistent/_immutable.py new file mode 100644 index 0000000..7c75945 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_immutable.py @@ -0,0 +1,103 @@ +import sys + + +def immutable(members='', name='Immutable', verbose=False): + """ + Produces a class that either can be used standalone or as a base class for persistent classes. + + This is a thin wrapper around a named tuple. + + Constructing a type and using it to instantiate objects: + + >>> Point = immutable('x, y', name='Point') + >>> p = Point(1, 2) + >>> p2 = p.set(x=3) + >>> p + Point(x=1, y=2) + >>> p2 + Point(x=3, y=2) + + Inheriting from a constructed type. In this case no type name needs to be supplied: + + >>> class PositivePoint(immutable('x, y')): + ... __slots__ = tuple() + ... def __new__(cls, x, y): + ... if x > 0 and y > 0: + ... return super(PositivePoint, cls).__new__(cls, x, y) + ... raise Exception('Coordinates must be positive!') + ... + >>> p = PositivePoint(1, 2) + >>> p.set(x=3) + PositivePoint(x=3, y=2) + >>> p.set(y=-3) + Traceback (most recent call last): + Exception: Coordinates must be positive! + + The persistent class also supports the notion of frozen members. The value of a frozen member + cannot be updated. For example it could be used to implement an ID that should remain the same + over time. A frozen member is denoted by a trailing underscore. + + >>> Point = immutable('x, y, id_', name='Point') + >>> p = Point(1, 2, id_=17) + >>> p.set(x=3) + Point(x=3, y=2, id_=17) + >>> p.set(id_=18) + Traceback (most recent call last): + AttributeError: Cannot set frozen members id_ + """ + + if isinstance(members, str): + members = members.replace(',', ' ').split() + + def frozen_member_test(): + frozen_members = ["'%s'" % f for f in members if f.endswith('_')] + if frozen_members: + return """ + frozen_fields = fields_to_modify & set([{frozen_members}]) + if frozen_fields: + raise AttributeError('Cannot set frozen members %s' % ', '.join(frozen_fields)) + """.format(frozen_members=', '.join(frozen_members)) + + return '' + + verbose_string = "" + if sys.version_info < (3, 7): + # Verbose is no longer supported in Python 3.7 + verbose_string = ", verbose={verbose}".format(verbose=verbose) + + quoted_members = ', '.join("'%s'" % m for m in members) + template = """ +class {class_name}(namedtuple('ImmutableBase', [{quoted_members}]{verbose_string})): + __slots__ = tuple() + + def __repr__(self): + return super({class_name}, self).__repr__().replace('ImmutableBase', self.__class__.__name__) + + def set(self, **kwargs): + if not kwargs: + return self + + fields_to_modify = set(kwargs.keys()) + if not fields_to_modify <= {member_set}: + raise AttributeError("'%s' is not a member" % ', '.join(fields_to_modify - {member_set})) + + {frozen_member_test} + + return self.__class__.__new__(self.__class__, *map(kwargs.pop, [{quoted_members}], self)) +""".format(quoted_members=quoted_members, + member_set="set([%s])" % quoted_members if quoted_members else 'set()', + frozen_member_test=frozen_member_test(), + verbose_string=verbose_string, + class_name=name) + + if verbose: + print(template) + + from collections import namedtuple + namespace = dict(namedtuple=namedtuple, __name__='pyrsistent_immutable') + try: + exec(template, namespace) + except SyntaxError as e: + raise SyntaxError(str(e) + ':\n' + template) from e + + return namespace[name] diff --git a/src/poetry/core/_vendor/pyrsistent/_pbag.py b/src/poetry/core/_vendor/pyrsistent/_pbag.py new file mode 100644 index 0000000..9cf5840 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pbag.py @@ -0,0 +1,267 @@ +from collections.abc import Container, Iterable, Sized, Hashable +from functools import reduce +from pyrsistent._pmap import pmap + + +def _add_to_counters(counters, element): + return counters.set(element, counters.get(element, 0) + 1) + + +class PBag(object): + """ + A persistent bag/multiset type. + + Requires elements to be hashable, and allows duplicates, but has no + ordering. Bags are hashable. + + Do not instantiate directly, instead use the factory functions :py:func:`b` + or :py:func:`pbag` to create an instance. + + Some examples: + + >>> s = pbag([1, 2, 3, 1]) + >>> s2 = s.add(4) + >>> s3 = s2.remove(1) + >>> s + pbag([1, 1, 2, 3]) + >>> s2 + pbag([1, 1, 2, 3, 4]) + >>> s3 + pbag([1, 2, 3, 4]) + """ + + __slots__ = ('_counts', '__weakref__') + + def __init__(self, counts): + self._counts = counts + + def add(self, element): + """ + Add an element to the bag. + + >>> s = pbag([1]) + >>> s2 = s.add(1) + >>> s3 = s.add(2) + >>> s2 + pbag([1, 1]) + >>> s3 + pbag([1, 2]) + """ + return PBag(_add_to_counters(self._counts, element)) + + def update(self, iterable): + """ + Update bag with all elements in iterable. + + >>> s = pbag([1]) + >>> s.update([1, 2]) + pbag([1, 1, 2]) + """ + if iterable: + return PBag(reduce(_add_to_counters, iterable, self._counts)) + + return self + + def remove(self, element): + """ + Remove an element from the bag. + + >>> s = pbag([1, 1, 2]) + >>> s2 = s.remove(1) + >>> s3 = s.remove(2) + >>> s2 + pbag([1, 2]) + >>> s3 + pbag([1, 1]) + """ + if element not in self._counts: + raise KeyError(element) + elif self._counts[element] == 1: + newc = self._counts.remove(element) + else: + newc = self._counts.set(element, self._counts[element] - 1) + return PBag(newc) + + def count(self, element): + """ + Return the number of times an element appears. + + + >>> pbag([]).count('non-existent') + 0 + >>> pbag([1, 1, 2]).count(1) + 2 + """ + return self._counts.get(element, 0) + + def __len__(self): + """ + Return the length including duplicates. + + >>> len(pbag([1, 1, 2])) + 3 + """ + return sum(self._counts.itervalues()) + + def __iter__(self): + """ + Return an iterator of all elements, including duplicates. + + >>> list(pbag([1, 1, 2])) + [1, 1, 2] + >>> list(pbag([1, 2])) + [1, 2] + """ + for elt, count in self._counts.iteritems(): + for i in range(count): + yield elt + + def __contains__(self, elt): + """ + Check if an element is in the bag. + + >>> 1 in pbag([1, 1, 2]) + True + >>> 0 in pbag([1, 2]) + False + """ + return elt in self._counts + + def __repr__(self): + return "pbag({0})".format(list(self)) + + def __eq__(self, other): + """ + Check if two bags are equivalent, honoring the number of duplicates, + and ignoring insertion order. + + >>> pbag([1, 1, 2]) == pbag([1, 2]) + False + >>> pbag([2, 1, 0]) == pbag([0, 1, 2]) + True + """ + if type(other) is not PBag: + raise TypeError("Can only compare PBag with PBags") + return self._counts == other._counts + + def __lt__(self, other): + raise TypeError('PBags are not orderable') + + __le__ = __lt__ + __gt__ = __lt__ + __ge__ = __lt__ + + # Multiset-style operations similar to collections.Counter + + def __add__(self, other): + """ + Combine elements from two PBags. + + >>> pbag([1, 2, 2]) + pbag([2, 3, 3]) + pbag([1, 2, 2, 2, 3, 3]) + """ + if not isinstance(other, PBag): + return NotImplemented + result = self._counts.evolver() + for elem, other_count in other._counts.iteritems(): + result[elem] = self.count(elem) + other_count + return PBag(result.persistent()) + + def __sub__(self, other): + """ + Remove elements from one PBag that are present in another. + + >>> pbag([1, 2, 2, 2, 3]) - pbag([2, 3, 3, 4]) + pbag([1, 2, 2]) + """ + if not isinstance(other, PBag): + return NotImplemented + result = self._counts.evolver() + for elem, other_count in other._counts.iteritems(): + newcount = self.count(elem) - other_count + if newcount > 0: + result[elem] = newcount + elif elem in self: + result.remove(elem) + return PBag(result.persistent()) + + def __or__(self, other): + """ + Union: Keep elements that are present in either of two PBags. + + >>> pbag([1, 2, 2, 2]) | pbag([2, 3, 3]) + pbag([1, 2, 2, 2, 3, 3]) + """ + if not isinstance(other, PBag): + return NotImplemented + result = self._counts.evolver() + for elem, other_count in other._counts.iteritems(): + count = self.count(elem) + newcount = max(count, other_count) + result[elem] = newcount + return PBag(result.persistent()) + + def __and__(self, other): + """ + Intersection: Only keep elements that are present in both PBags. + + >>> pbag([1, 2, 2, 2]) & pbag([2, 3, 3]) + pbag([2]) + """ + if not isinstance(other, PBag): + return NotImplemented + result = pmap().evolver() + for elem, count in self._counts.iteritems(): + newcount = min(count, other.count(elem)) + if newcount > 0: + result[elem] = newcount + return PBag(result.persistent()) + + def __hash__(self): + """ + Hash based on value of elements. + + >>> m = pmap({pbag([1, 2]): "it's here!"}) + >>> m[pbag([2, 1])] + "it's here!" + >>> pbag([1, 1, 2]) in m + False + """ + return hash(self._counts) + + +Container.register(PBag) +Iterable.register(PBag) +Sized.register(PBag) +Hashable.register(PBag) + + +def b(*elements): + """ + Construct a persistent bag. + + Takes an arbitrary number of arguments to insert into the new persistent + bag. + + >>> b(1, 2, 3, 2) + pbag([1, 2, 2, 3]) + """ + return pbag(elements) + + +def pbag(elements): + """ + Convert an iterable to a persistent bag. + + Takes an iterable with elements to insert. + + >>> pbag([1, 2, 3, 2]) + pbag([1, 2, 2, 3]) + """ + if not elements: + return _EMPTY_PBAG + return PBag(reduce(_add_to_counters, elements, pmap())) + + +_EMPTY_PBAG = PBag(pmap()) + diff --git a/src/poetry/core/_vendor/pyrsistent/_pclass.py b/src/poetry/core/_vendor/pyrsistent/_pclass.py new file mode 100644 index 0000000..fd31a95 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pclass.py @@ -0,0 +1,262 @@ +from pyrsistent._checked_types import (InvariantException, CheckedType, _restore_pickle, store_invariants) +from pyrsistent._field_common import ( + set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants +) +from pyrsistent._transformations import transform + + +def _is_pclass(bases): + return len(bases) == 1 and bases[0] == CheckedType + + +class PClassMeta(type): + def __new__(mcs, name, bases, dct): + set_fields(dct, bases, name='_pclass_fields') + store_invariants(dct, bases, '_pclass_invariants', '__invariant__') + dct['__slots__'] = ('_pclass_frozen',) + tuple(key for key in dct['_pclass_fields']) + + # There must only be one __weakref__ entry in the inheritance hierarchy, + # lets put it on the top level class. + if _is_pclass(bases): + dct['__slots__'] += ('__weakref__',) + + return super(PClassMeta, mcs).__new__(mcs, name, bases, dct) + +_MISSING_VALUE = object() + + +def _check_and_set_attr(cls, field, name, value, result, invariant_errors): + check_type(cls, field, name, value) + is_ok, error_code = field.invariant(value) + if not is_ok: + invariant_errors.append(error_code) + else: + setattr(result, name, value) + + +class PClass(CheckedType, metaclass=PClassMeta): + """ + A PClass is a python class with a fixed set of specified fields. PClasses are declared as python classes inheriting + from PClass. It is defined the same way that PRecords are and behaves like a PRecord in all aspects except that it + is not a PMap and hence not a collection but rather a plain Python object. + + + More documentation and examples of PClass usage is available at https://github.com/tobgu/pyrsistent + """ + def __new__(cls, **kwargs): # Support *args? + result = super(PClass, cls).__new__(cls) + factory_fields = kwargs.pop('_factory_fields', None) + ignore_extra = kwargs.pop('ignore_extra', None) + missing_fields = [] + invariant_errors = [] + for name, field in cls._pclass_fields.items(): + if name in kwargs: + if factory_fields is None or name in factory_fields: + if is_field_ignore_extra_complaint(PClass, field, ignore_extra): + value = field.factory(kwargs[name], ignore_extra=ignore_extra) + else: + value = field.factory(kwargs[name]) + else: + value = kwargs[name] + _check_and_set_attr(cls, field, name, value, result, invariant_errors) + del kwargs[name] + elif field.initial is not PFIELD_NO_INITIAL: + initial = field.initial() if callable(field.initial) else field.initial + _check_and_set_attr( + cls, field, name, initial, result, invariant_errors) + elif field.mandatory: + missing_fields.append('{0}.{1}'.format(cls.__name__, name)) + + if invariant_errors or missing_fields: + raise InvariantException(tuple(invariant_errors), tuple(missing_fields), 'Field invariant failed') + + if kwargs: + raise AttributeError("'{0}' are not among the specified fields for {1}".format( + ', '.join(kwargs), cls.__name__)) + + check_global_invariants(result, cls._pclass_invariants) + + result._pclass_frozen = True + return result + + def set(self, *args, **kwargs): + """ + Set a field in the instance. Returns a new instance with the updated value. The original instance remains + unmodified. Accepts key-value pairs or single string representing the field name and a value. + + >>> from pyrsistent import PClass, field + >>> class AClass(PClass): + ... x = field() + ... + >>> a = AClass(x=1) + >>> a2 = a.set(x=2) + >>> a3 = a.set('x', 3) + >>> a + AClass(x=1) + >>> a2 + AClass(x=2) + >>> a3 + AClass(x=3) + """ + if args: + kwargs[args[0]] = args[1] + + factory_fields = set(kwargs) + + for key in self._pclass_fields: + if key not in kwargs: + value = getattr(self, key, _MISSING_VALUE) + if value is not _MISSING_VALUE: + kwargs[key] = value + + return self.__class__(_factory_fields=factory_fields, **kwargs) + + @classmethod + def create(cls, kwargs, _factory_fields=None, ignore_extra=False): + """ + Factory method. Will create a new PClass of the current type and assign the values + specified in kwargs. + + :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not + in the set of fields on the PClass. + """ + if isinstance(kwargs, cls): + return kwargs + + if ignore_extra: + kwargs = {k: kwargs[k] for k in cls._pclass_fields if k in kwargs} + + return cls(_factory_fields=_factory_fields, ignore_extra=ignore_extra, **kwargs) + + def serialize(self, format=None): + """ + Serialize the current PClass using custom serializer functions for fields where + such have been supplied. + """ + result = {} + for name in self._pclass_fields: + value = getattr(self, name, _MISSING_VALUE) + if value is not _MISSING_VALUE: + result[name] = serialize(self._pclass_fields[name].serializer, format, value) + + return result + + def transform(self, *transformations): + """ + Apply transformations to the currency PClass. For more details on transformations see + the documentation for PMap. Transformations on PClasses do not support key matching + since the PClass is not a collection. Apart from that the transformations available + for other persistent types work as expected. + """ + return transform(self, transformations) + + def __eq__(self, other): + if isinstance(other, self.__class__): + for name in self._pclass_fields: + if getattr(self, name, _MISSING_VALUE) != getattr(other, name, _MISSING_VALUE): + return False + + return True + + return NotImplemented + + def __ne__(self, other): + return not self == other + + def __hash__(self): + # May want to optimize this by caching the hash somehow + return hash(tuple((key, getattr(self, key, _MISSING_VALUE)) for key in self._pclass_fields)) + + def __setattr__(self, key, value): + if getattr(self, '_pclass_frozen', False): + raise AttributeError("Can't set attribute, key={0}, value={1}".format(key, value)) + + super(PClass, self).__setattr__(key, value) + + def __delattr__(self, key): + raise AttributeError("Can't delete attribute, key={0}, use remove()".format(key)) + + def _to_dict(self): + result = {} + for key in self._pclass_fields: + value = getattr(self, key, _MISSING_VALUE) + if value is not _MISSING_VALUE: + result[key] = value + + return result + + def __repr__(self): + return "{0}({1})".format(self.__class__.__name__, + ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self._to_dict().items())) + + def __reduce__(self): + # Pickling support + data = dict((key, getattr(self, key)) for key in self._pclass_fields if hasattr(self, key)) + return _restore_pickle, (self.__class__, data,) + + def evolver(self): + """ + Returns an evolver for this object. + """ + return _PClassEvolver(self, self._to_dict()) + + def remove(self, name): + """ + Remove attribute given by name from the current instance. Raises AttributeError if the + attribute doesn't exist. + """ + evolver = self.evolver() + del evolver[name] + return evolver.persistent() + + +class _PClassEvolver(object): + __slots__ = ('_pclass_evolver_original', '_pclass_evolver_data', '_pclass_evolver_data_is_dirty', '_factory_fields') + + def __init__(self, original, initial_dict): + self._pclass_evolver_original = original + self._pclass_evolver_data = initial_dict + self._pclass_evolver_data_is_dirty = False + self._factory_fields = set() + + def __getitem__(self, item): + return self._pclass_evolver_data[item] + + def set(self, key, value): + if self._pclass_evolver_data.get(key, _MISSING_VALUE) is not value: + self._pclass_evolver_data[key] = value + self._factory_fields.add(key) + self._pclass_evolver_data_is_dirty = True + + return self + + def __setitem__(self, key, value): + self.set(key, value) + + def remove(self, item): + if item in self._pclass_evolver_data: + del self._pclass_evolver_data[item] + self._factory_fields.discard(item) + self._pclass_evolver_data_is_dirty = True + return self + + raise AttributeError(item) + + def __delitem__(self, item): + self.remove(item) + + def persistent(self): + if self._pclass_evolver_data_is_dirty: + return self._pclass_evolver_original.__class__(_factory_fields=self._factory_fields, + **self._pclass_evolver_data) + + return self._pclass_evolver_original + + def __setattr__(self, key, value): + if key not in self.__slots__: + self.set(key, value) + else: + super(_PClassEvolver, self).__setattr__(key, value) + + def __getattr__(self, item): + return self[item] diff --git a/src/poetry/core/_vendor/pyrsistent/_pdeque.py b/src/poetry/core/_vendor/pyrsistent/_pdeque.py new file mode 100644 index 0000000..bd11bfa --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pdeque.py @@ -0,0 +1,376 @@ +from collections.abc import Sequence, Hashable +from itertools import islice, chain +from numbers import Integral +from pyrsistent._plist import plist + + +class PDeque(object): + """ + Persistent double ended queue (deque). Allows quick appends and pops in both ends. Implemented + using two persistent lists. + + A maximum length can be specified to create a bounded queue. + + Fully supports the Sequence and Hashable protocols including indexing and slicing but + if you need fast random access go for the PVector instead. + + Do not instantiate directly, instead use the factory functions :py:func:`dq` or :py:func:`pdeque` to + create an instance. + + Some examples: + + >>> x = pdeque([1, 2, 3]) + >>> x.left + 1 + >>> x.right + 3 + >>> x[0] == x.left + True + >>> x[-1] == x.right + True + >>> x.pop() + pdeque([1, 2]) + >>> x.pop() == x[:-1] + True + >>> x.popleft() + pdeque([2, 3]) + >>> x.append(4) + pdeque([1, 2, 3, 4]) + >>> x.appendleft(4) + pdeque([4, 1, 2, 3]) + + >>> y = pdeque([1, 2, 3], maxlen=3) + >>> y.append(4) + pdeque([2, 3, 4], maxlen=3) + >>> y.appendleft(4) + pdeque([4, 1, 2], maxlen=3) + """ + __slots__ = ('_left_list', '_right_list', '_length', '_maxlen', '__weakref__') + + def __new__(cls, left_list, right_list, length, maxlen=None): + instance = super(PDeque, cls).__new__(cls) + instance._left_list = left_list + instance._right_list = right_list + instance._length = length + + if maxlen is not None: + if not isinstance(maxlen, Integral): + raise TypeError('An integer is required as maxlen') + + if maxlen < 0: + raise ValueError("maxlen must be non-negative") + + instance._maxlen = maxlen + return instance + + @property + def right(self): + """ + Rightmost element in dqueue. + """ + return PDeque._tip_from_lists(self._right_list, self._left_list) + + @property + def left(self): + """ + Leftmost element in dqueue. + """ + return PDeque._tip_from_lists(self._left_list, self._right_list) + + @staticmethod + def _tip_from_lists(primary_list, secondary_list): + if primary_list: + return primary_list.first + + if secondary_list: + return secondary_list[-1] + + raise IndexError('No elements in empty deque') + + def __iter__(self): + return chain(self._left_list, self._right_list.reverse()) + + def __repr__(self): + return "pdeque({0}{1})".format(list(self), + ', maxlen={0}'.format(self._maxlen) if self._maxlen is not None else '') + __str__ = __repr__ + + @property + def maxlen(self): + """ + Maximum length of the queue. + """ + return self._maxlen + + def pop(self, count=1): + """ + Return new deque with rightmost element removed. Popping the empty queue + will return the empty queue. A optional count can be given to indicate the + number of elements to pop. Popping with a negative index is the same as + popleft. Executes in amortized O(k) where k is the number of elements to pop. + + >>> pdeque([1, 2]).pop() + pdeque([1]) + >>> pdeque([1, 2]).pop(2) + pdeque([]) + >>> pdeque([1, 2]).pop(-1) + pdeque([2]) + """ + if count < 0: + return self.popleft(-count) + + new_right_list, new_left_list = PDeque._pop_lists(self._right_list, self._left_list, count) + return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) + + def popleft(self, count=1): + """ + Return new deque with leftmost element removed. Otherwise functionally + equivalent to pop(). + + >>> pdeque([1, 2]).popleft() + pdeque([2]) + """ + if count < 0: + return self.pop(-count) + + new_left_list, new_right_list = PDeque._pop_lists(self._left_list, self._right_list, count) + return PDeque(new_left_list, new_right_list, max(self._length - count, 0), self._maxlen) + + @staticmethod + def _pop_lists(primary_list, secondary_list, count): + new_primary_list = primary_list + new_secondary_list = secondary_list + + while count > 0 and (new_primary_list or new_secondary_list): + count -= 1 + if new_primary_list.rest: + new_primary_list = new_primary_list.rest + elif new_primary_list: + new_primary_list = new_secondary_list.reverse() + new_secondary_list = plist() + else: + new_primary_list = new_secondary_list.reverse().rest + new_secondary_list = plist() + + return new_primary_list, new_secondary_list + + def _is_empty(self): + return not self._left_list and not self._right_list + + def __lt__(self, other): + if not isinstance(other, PDeque): + return NotImplemented + + return tuple(self) < tuple(other) + + def __eq__(self, other): + if not isinstance(other, PDeque): + return NotImplemented + + if tuple(self) == tuple(other): + # Sanity check of the length value since it is redundant (there for performance) + assert len(self) == len(other) + return True + + return False + + def __hash__(self): + return hash(tuple(self)) + + def __len__(self): + return self._length + + def append(self, elem): + """ + Return new deque with elem as the rightmost element. + + >>> pdeque([1, 2]).append(3) + pdeque([1, 2, 3]) + """ + new_left_list, new_right_list, new_length = self._append(self._left_list, self._right_list, elem) + return PDeque(new_left_list, new_right_list, new_length, self._maxlen) + + def appendleft(self, elem): + """ + Return new deque with elem as the leftmost element. + + >>> pdeque([1, 2]).appendleft(3) + pdeque([3, 1, 2]) + """ + new_right_list, new_left_list, new_length = self._append(self._right_list, self._left_list, elem) + return PDeque(new_left_list, new_right_list, new_length, self._maxlen) + + def _append(self, primary_list, secondary_list, elem): + if self._maxlen is not None and self._length == self._maxlen: + if self._maxlen == 0: + return primary_list, secondary_list, 0 + new_primary_list, new_secondary_list = PDeque._pop_lists(primary_list, secondary_list, 1) + return new_primary_list, new_secondary_list.cons(elem), self._length + + return primary_list, secondary_list.cons(elem), self._length + 1 + + @staticmethod + def _extend_list(the_list, iterable): + count = 0 + for elem in iterable: + the_list = the_list.cons(elem) + count += 1 + + return the_list, count + + def _extend(self, primary_list, secondary_list, iterable): + new_primary_list, extend_count = PDeque._extend_list(primary_list, iterable) + new_secondary_list = secondary_list + current_len = self._length + extend_count + if self._maxlen is not None and current_len > self._maxlen: + pop_len = current_len - self._maxlen + new_secondary_list, new_primary_list = PDeque._pop_lists(new_secondary_list, new_primary_list, pop_len) + extend_count -= pop_len + + return new_primary_list, new_secondary_list, extend_count + + def extend(self, iterable): + """ + Return new deque with all elements of iterable appended to the right. + + >>> pdeque([1, 2]).extend([3, 4]) + pdeque([1, 2, 3, 4]) + """ + new_right_list, new_left_list, extend_count = self._extend(self._right_list, self._left_list, iterable) + return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) + + def extendleft(self, iterable): + """ + Return new deque with all elements of iterable appended to the left. + + NB! The elements will be inserted in reverse order compared to the order in the iterable. + + >>> pdeque([1, 2]).extendleft([3, 4]) + pdeque([4, 3, 1, 2]) + """ + new_left_list, new_right_list, extend_count = self._extend(self._left_list, self._right_list, iterable) + return PDeque(new_left_list, new_right_list, self._length + extend_count, self._maxlen) + + def count(self, elem): + """ + Return the number of elements equal to elem present in the queue + + >>> pdeque([1, 2, 1]).count(1) + 2 + """ + return self._left_list.count(elem) + self._right_list.count(elem) + + def remove(self, elem): + """ + Return new deque with first element from left equal to elem removed. If no such element is found + a ValueError is raised. + + >>> pdeque([2, 1, 2]).remove(2) + pdeque([1, 2]) + """ + try: + return PDeque(self._left_list.remove(elem), self._right_list, self._length - 1) + except ValueError: + # Value not found in left list, try the right list + try: + # This is severely inefficient with a double reverse, should perhaps implement a remove_last()? + return PDeque(self._left_list, + self._right_list.reverse().remove(elem).reverse(), self._length - 1) + except ValueError as e: + raise ValueError('{0} not found in PDeque'.format(elem)) from e + + def reverse(self): + """ + Return reversed deque. + + >>> pdeque([1, 2, 3]).reverse() + pdeque([3, 2, 1]) + + Also supports the standard python reverse function. + + >>> reversed(pdeque([1, 2, 3])) + pdeque([3, 2, 1]) + """ + return PDeque(self._right_list, self._left_list, self._length) + __reversed__ = reverse + + def rotate(self, steps): + """ + Return deque with elements rotated steps steps. + + >>> x = pdeque([1, 2, 3]) + >>> x.rotate(1) + pdeque([3, 1, 2]) + >>> x.rotate(-2) + pdeque([3, 1, 2]) + """ + popped_deque = self.pop(steps) + if steps >= 0: + return popped_deque.extendleft(islice(self.reverse(), steps)) + + return popped_deque.extend(islice(self, -steps)) + + def __reduce__(self): + # Pickling support + return pdeque, (list(self), self._maxlen) + + def __getitem__(self, index): + if isinstance(index, slice): + if index.step is not None and index.step != 1: + # Too difficult, no structural sharing possible + return pdeque(tuple(self)[index], maxlen=self._maxlen) + + result = self + if index.start is not None: + result = result.popleft(index.start % self._length) + if index.stop is not None: + result = result.pop(self._length - (index.stop % self._length)) + + return result + + if not isinstance(index, Integral): + raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) + + if index >= 0: + return self.popleft(index).left + + shifted = len(self) + index + if shifted < 0: + raise IndexError( + "pdeque index {0} out of range {1}".format(index, len(self)), + ) + return self.popleft(shifted).left + + index = Sequence.index + +Sequence.register(PDeque) +Hashable.register(PDeque) + + +def pdeque(iterable=(), maxlen=None): + """ + Return deque containing the elements of iterable. If maxlen is specified then + len(iterable) - maxlen elements are discarded from the left to if len(iterable) > maxlen. + + >>> pdeque([1, 2, 3]) + pdeque([1, 2, 3]) + >>> pdeque([1, 2, 3, 4], maxlen=2) + pdeque([3, 4], maxlen=2) + """ + t = tuple(iterable) + if maxlen is not None: + t = t[-maxlen:] + length = len(t) + pivot = int(length / 2) + left = plist(t[:pivot]) + right = plist(t[pivot:], reverse=True) + return PDeque(left, right, length, maxlen) + +def dq(*elements): + """ + Return deque containing all arguments. + + >>> dq(1, 2, 3) + pdeque([1, 2, 3]) + """ + return pdeque(elements) diff --git a/src/poetry/core/_vendor/pyrsistent/_plist.py b/src/poetry/core/_vendor/pyrsistent/_plist.py new file mode 100644 index 0000000..bea7f5e --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_plist.py @@ -0,0 +1,313 @@ +from collections.abc import Sequence, Hashable +from numbers import Integral +from functools import reduce + + +class _PListBuilder(object): + """ + Helper class to allow construction of a list without + having to reverse it in the end. + """ + __slots__ = ('_head', '_tail') + + def __init__(self): + self._head = _EMPTY_PLIST + self._tail = _EMPTY_PLIST + + def _append(self, elem, constructor): + if not self._tail: + self._head = constructor(elem) + self._tail = self._head + else: + self._tail.rest = constructor(elem) + self._tail = self._tail.rest + + return self._head + + def append_elem(self, elem): + return self._append(elem, lambda e: PList(e, _EMPTY_PLIST)) + + def append_plist(self, pl): + return self._append(pl, lambda l: l) + + def build(self): + return self._head + + +class _PListBase(object): + __slots__ = ('__weakref__',) + + # Selected implementations can be taken straight from the Sequence + # class, other are less suitable. Especially those that work with + # index lookups. + count = Sequence.count + index = Sequence.index + + def __reduce__(self): + # Pickling support + return plist, (list(self),) + + def __len__(self): + """ + Return the length of the list, computed by traversing it. + + This is obviously O(n) but with the current implementation + where a list is also a node the overhead of storing the length + in every node would be quite significant. + """ + return sum(1 for _ in self) + + def __repr__(self): + return "plist({0})".format(list(self)) + __str__ = __repr__ + + def cons(self, elem): + """ + Return a new list with elem inserted as new head. + + >>> plist([1, 2]).cons(3) + plist([3, 1, 2]) + """ + return PList(elem, self) + + def mcons(self, iterable): + """ + Return a new list with all elements of iterable repeatedly cons:ed to the current list. + NB! The elements will be inserted in the reverse order of the iterable. + Runs in O(len(iterable)). + + >>> plist([1, 2]).mcons([3, 4]) + plist([4, 3, 1, 2]) + """ + head = self + for elem in iterable: + head = head.cons(elem) + + return head + + def reverse(self): + """ + Return a reversed version of list. Runs in O(n) where n is the length of the list. + + >>> plist([1, 2, 3]).reverse() + plist([3, 2, 1]) + + Also supports the standard reversed function. + + >>> reversed(plist([1, 2, 3])) + plist([3, 2, 1]) + """ + result = plist() + head = self + while head: + result = result.cons(head.first) + head = head.rest + + return result + __reversed__ = reverse + + def split(self, index): + """ + Spilt the list at position specified by index. Returns a tuple containing the + list up until index and the list after the index. Runs in O(index). + + >>> plist([1, 2, 3, 4]).split(2) + (plist([1, 2]), plist([3, 4])) + """ + lb = _PListBuilder() + right_list = self + i = 0 + while right_list and i < index: + lb.append_elem(right_list.first) + right_list = right_list.rest + i += 1 + + if not right_list: + # Just a small optimization in the cases where no split occurred + return self, _EMPTY_PLIST + + return lb.build(), right_list + + def __iter__(self): + li = self + while li: + yield li.first + li = li.rest + + def __lt__(self, other): + if not isinstance(other, _PListBase): + return NotImplemented + + return tuple(self) < tuple(other) + + def __eq__(self, other): + """ + Traverses the lists, checking equality of elements. + + This is an O(n) operation, but preserves the standard semantics of list equality. + """ + if not isinstance(other, _PListBase): + return NotImplemented + + self_head = self + other_head = other + while self_head and other_head: + if not self_head.first == other_head.first: + return False + self_head = self_head.rest + other_head = other_head.rest + + return not self_head and not other_head + + def __getitem__(self, index): + # Don't use this this data structure if you plan to do a lot of indexing, it is + # very inefficient! Use a PVector instead! + + if isinstance(index, slice): + if index.start is not None and index.stop is None and (index.step is None or index.step == 1): + return self._drop(index.start) + + # Take the easy way out for all other slicing cases, not much structural reuse possible anyway + return plist(tuple(self)[index]) + + if not isinstance(index, Integral): + raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) + + if index < 0: + # NB: O(n)! + index += len(self) + + try: + return self._drop(index).first + except AttributeError as e: + raise IndexError("PList index out of range") from e + + def _drop(self, count): + if count < 0: + raise IndexError("PList index out of range") + + head = self + while count > 0: + head = head.rest + count -= 1 + + return head + + def __hash__(self): + return hash(tuple(self)) + + def remove(self, elem): + """ + Return new list with first element equal to elem removed. O(k) where k is the position + of the element that is removed. + + Raises ValueError if no matching element is found. + + >>> plist([1, 2, 1]).remove(1) + plist([2, 1]) + """ + + builder = _PListBuilder() + head = self + while head: + if head.first == elem: + return builder.append_plist(head.rest) + + builder.append_elem(head.first) + head = head.rest + + raise ValueError('{0} not found in PList'.format(elem)) + + +class PList(_PListBase): + """ + Classical Lisp style singly linked list. Adding elements to the head using cons is O(1). + Element access is O(k) where k is the position of the element in the list. Taking the + length of the list is O(n). + + Fully supports the Sequence and Hashable protocols including indexing and slicing but + if you need fast random access go for the PVector instead. + + Do not instantiate directly, instead use the factory functions :py:func:`l` or :py:func:`plist` to + create an instance. + + Some examples: + + >>> x = plist([1, 2]) + >>> y = x.cons(3) + >>> x + plist([1, 2]) + >>> y + plist([3, 1, 2]) + >>> y.first + 3 + >>> y.rest == x + True + >>> y[:2] + plist([3, 1]) + """ + __slots__ = ('first', 'rest') + + def __new__(cls, first, rest): + instance = super(PList, cls).__new__(cls) + instance.first = first + instance.rest = rest + return instance + + def __bool__(self): + return True + __nonzero__ = __bool__ + + +Sequence.register(PList) +Hashable.register(PList) + + +class _EmptyPList(_PListBase): + __slots__ = () + + def __bool__(self): + return False + __nonzero__ = __bool__ + + @property + def first(self): + raise AttributeError("Empty PList has no first") + + @property + def rest(self): + return self + + +Sequence.register(_EmptyPList) +Hashable.register(_EmptyPList) + +_EMPTY_PLIST = _EmptyPList() + + +def plist(iterable=(), reverse=False): + """ + Creates a new persistent list containing all elements of iterable. + Optional parameter reverse specifies if the elements should be inserted in + reverse order or not. + + >>> plist([1, 2, 3]) + plist([1, 2, 3]) + >>> plist([1, 2, 3], reverse=True) + plist([3, 2, 1]) + """ + if not reverse: + iterable = list(iterable) + iterable.reverse() + + return reduce(lambda pl, elem: pl.cons(elem), iterable, _EMPTY_PLIST) + + +def l(*elements): + """ + Creates a new persistent list containing all arguments. + + >>> l(1, 2, 3) + plist([1, 2, 3]) + """ + return plist(elements) diff --git a/src/poetry/core/_vendor/pyrsistent/_pmap.py b/src/poetry/core/_vendor/pyrsistent/_pmap.py new file mode 100644 index 0000000..c6c7c7f --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pmap.py @@ -0,0 +1,576 @@ +from collections.abc import Mapping, Hashable +from itertools import chain +from pyrsistent._pvector import pvector +from pyrsistent._transformations import transform + +class PMapView: + """View type for the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_values` and `dict_items` + types that result from expreessions such as `{}.values()` and + `{}.items()`. The equivalent for `{}.keys()` is absent because the keys are + instead represented by a `PSet` object, which can be created in `O(1)` time. + + The `PMapView` class is overloaded by the `PMapValues` and `PMapItems` + classes which handle the specific case of values and items, respectively + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + # The public methods that use the above. + def __init__(self, m): + # Make sure this is a persistnt map + if not isinstance(m, PMap): + # We can convert mapping objects into pmap objects, I guess (but why?) + if isinstance(m, Mapping): + m = pmap(m) + else: + raise TypeError("PViewMap requires a Mapping object") + object.__setattr__(self, '_map', m) + + def __len__(self): + return len(self._map) + + def __setattr__(self, k, v): + raise TypeError("%s is immutable" % (type(self),)) + + def __reversed__(self): + raise TypeError("Persistent maps are not reversible") + +class PMapValues(PMapView): + """View type for the values of the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_values` type that result + from expreessions such as `{}.values()`. See also `PMapView`. + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + def __iter__(self): + return self._map.itervalues() + + def __contains__(self, arg): + return arg in self._map.itervalues() + + # The str and repr methods imitate the dict_view style currently. + def __str__(self): + return f"pmap_values({list(iter(self))})" + + def __repr__(self): + return f"pmap_values({list(iter(self))})" + + def __eq__(self, x): + # For whatever reason, dict_values always seem to return False for == + # (probably it's not implemented), so we mimic that. + if x is self: return True + else: return False + +class PMapItems(PMapView): + """View type for the items of the persistent map/dict type `PMap`. + + Provides an equivalent of Python's built-in `dict_items` type that result + from expreessions such as `{}.items()`. See also `PMapView`. + + Parameters + ---------- + m : mapping + The mapping/dict-like object of which a view is to be created. This + should generally be a `PMap` object. + """ + def __iter__(self): + return self._map.iteritems() + + def __contains__(self, arg): + try: (k,v) = arg + except Exception: return False + return k in self._map and self._map[k] == v + + # The str and repr methods mitate the dict_view style currently. + def __str__(self): + return f"pmap_items({list(iter(self))})" + + def __repr__(self): + return f"pmap_items({list(iter(self))})" + + def __eq__(self, x): + if x is self: return True + elif not isinstance(x, type(self)): return False + else: return self._map == x._map + +class PMap(object): + """ + Persistent map/dict. Tries to follow the same naming conventions as the built in dict where feasible. + + Do not instantiate directly, instead use the factory functions :py:func:`m` or :py:func:`pmap` to + create an instance. + + Was originally written as a very close copy of the Clojure equivalent but was later rewritten to closer + re-assemble the python dict. This means that a sparse vector (a PVector) of buckets is used. The keys are + hashed and the elements inserted at position hash % len(bucket_vector). Whenever the map size exceeds 2/3 of + the containing vectors size the map is reallocated to a vector of double the size. This is done to avoid + excessive hash collisions. + + This structure corresponds most closely to the built in dict type and is intended as a replacement. Where the + semantics are the same (more or less) the same function names have been used but for some cases it is not possible, + for example assignments and deletion of values. + + PMap implements the Mapping protocol and is Hashable. It also supports dot-notation for + element access. + + Random access and insert is log32(n) where n is the size of the map. + + The following are examples of some common operations on persistent maps + + >>> m1 = m(a=1, b=3) + >>> m2 = m1.set('c', 3) + >>> m3 = m2.remove('a') + >>> m1 == {'a': 1, 'b': 3} + True + >>> m2 == {'a': 1, 'b': 3, 'c': 3} + True + >>> m3 == {'b': 3, 'c': 3} + True + >>> m3['c'] + 3 + >>> m3.c + 3 + """ + __slots__ = ('_size', '_buckets', '__weakref__', '_cached_hash') + + def __new__(cls, size, buckets): + self = super(PMap, cls).__new__(cls) + self._size = size + self._buckets = buckets + return self + + @staticmethod + def _get_bucket(buckets, key): + index = hash(key) % len(buckets) + bucket = buckets[index] + return index, bucket + + @staticmethod + def _getitem(buckets, key): + _, bucket = PMap._get_bucket(buckets, key) + if bucket: + for k, v in bucket: + if k == key: + return v + + raise KeyError(key) + + def __getitem__(self, key): + return PMap._getitem(self._buckets, key) + + @staticmethod + def _contains(buckets, key): + _, bucket = PMap._get_bucket(buckets, key) + if bucket: + for k, _ in bucket: + if k == key: + return True + + return False + + return False + + def __contains__(self, key): + return self._contains(self._buckets, key) + + get = Mapping.get + + def __iter__(self): + return self.iterkeys() + + # If this method is not defined, then reversed(pmap) will attempt to reverse + # the map using len() and getitem, usually resulting in a mysterious + # KeyError. + def __reversed__(self): + raise TypeError("Persistent maps are not reversible") + + def __getattr__(self, key): + try: + return self[key] + except KeyError as e: + raise AttributeError( + "{0} has no attribute '{1}'".format(type(self).__name__, key) + ) from e + + def iterkeys(self): + for k, _ in self.iteritems(): + yield k + + # These are more efficient implementations compared to the original + # methods that are based on the keys iterator and then calls the + # accessor functions to access the value for the corresponding key + def itervalues(self): + for _, v in self.iteritems(): + yield v + + def iteritems(self): + for bucket in self._buckets: + if bucket: + for k, v in bucket: + yield k, v + + def values(self): + return PMapValues(self) + + def keys(self): + from ._pset import PSet + return PSet(self) + + def items(self): + return PMapItems(self) + + def __len__(self): + return self._size + + def __repr__(self): + return 'pmap({0})'.format(str(dict(self))) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Mapping): + return NotImplemented + if len(self) != len(other): + return False + if isinstance(other, PMap): + if (hasattr(self, '_cached_hash') and hasattr(other, '_cached_hash') + and self._cached_hash != other._cached_hash): + return False + if self._buckets == other._buckets: + return True + return dict(self.iteritems()) == dict(other.iteritems()) + elif isinstance(other, dict): + return dict(self.iteritems()) == other + return dict(self.iteritems()) == dict(other.items()) + + __ne__ = Mapping.__ne__ + + def __lt__(self, other): + raise TypeError('PMaps are not orderable') + + __le__ = __lt__ + __gt__ = __lt__ + __ge__ = __lt__ + + def __str__(self): + return self.__repr__() + + def __hash__(self): + if not hasattr(self, '_cached_hash'): + self._cached_hash = hash(frozenset(self.iteritems())) + return self._cached_hash + + def set(self, key, val): + """ + Return a new PMap with key and val inserted. + + >>> m1 = m(a=1, b=2) + >>> m2 = m1.set('a', 3) + >>> m3 = m1.set('c' ,4) + >>> m1 == {'a': 1, 'b': 2} + True + >>> m2 == {'a': 3, 'b': 2} + True + >>> m3 == {'a': 1, 'b': 2, 'c': 4} + True + """ + return self.evolver().set(key, val).persistent() + + def remove(self, key): + """ + Return a new PMap without the element specified by key. Raises KeyError if the element + is not present. + + >>> m1 = m(a=1, b=2) + >>> m1.remove('a') + pmap({'b': 2}) + """ + return self.evolver().remove(key).persistent() + + def discard(self, key): + """ + Return a new PMap without the element specified by key. Returns reference to itself + if element is not present. + + >>> m1 = m(a=1, b=2) + >>> m1.discard('a') + pmap({'b': 2}) + >>> m1 is m1.discard('c') + True + """ + try: + return self.remove(key) + except KeyError: + return self + + def update(self, *maps): + """ + Return a new PMap with the items in Mappings inserted. If the same key is present in multiple + maps the rightmost (last) value is inserted. + + >>> m1 = m(a=1, b=2) + >>> m1.update(m(a=2, c=3), {'a': 17, 'd': 35}) == {'a': 17, 'b': 2, 'c': 3, 'd': 35} + True + """ + return self.update_with(lambda l, r: r, *maps) + + def update_with(self, update_fn, *maps): + """ + Return a new PMap with the items in Mappings maps inserted. If the same key is present in multiple + maps the values will be merged using merge_fn going from left to right. + + >>> from operator import add + >>> m1 = m(a=1, b=2) + >>> m1.update_with(add, m(a=2)) == {'a': 3, 'b': 2} + True + + The reverse behaviour of the regular merge. Keep the leftmost element instead of the rightmost. + + >>> m1 = m(a=1) + >>> m1.update_with(lambda l, r: l, m(a=2), {'a':3}) + pmap({'a': 1}) + """ + evolver = self.evolver() + for map in maps: + for key, value in map.items(): + evolver.set(key, update_fn(evolver[key], value) if key in evolver else value) + + return evolver.persistent() + + def __add__(self, other): + return self.update(other) + + __or__ = __add__ + + def __reduce__(self): + # Pickling support + return pmap, (dict(self),) + + def transform(self, *transformations): + """ + Transform arbitrarily complex combinations of PVectors and PMaps. A transformation + consists of two parts. One match expression that specifies which elements to transform + and one transformation function that performs the actual transformation. + + >>> from pyrsistent import freeze, ny + >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'}, + ... {'author': 'Steve', 'content': 'A slightly longer article'}], + ... 'weather': {'temperature': '11C', 'wind': '5m/s'}}) + >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c) + >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c) + >>> very_short_news.articles[0].content + 'A short article' + >>> very_short_news.articles[1].content + 'A slightly long...' + + When nothing has been transformed the original data structure is kept + + >>> short_news is news_paper + True + >>> very_short_news is news_paper + False + >>> very_short_news.articles[0] is news_paper.articles[0] + True + """ + return transform(self, transformations) + + def copy(self): + return self + + class _Evolver(object): + __slots__ = ('_buckets_evolver', '_size', '_original_pmap') + + def __init__(self, original_pmap): + self._original_pmap = original_pmap + self._buckets_evolver = original_pmap._buckets.evolver() + self._size = original_pmap._size + + def __getitem__(self, key): + return PMap._getitem(self._buckets_evolver, key) + + def __setitem__(self, key, val): + self.set(key, val) + + def set(self, key, val): + kv = (key, val) + index, bucket = PMap._get_bucket(self._buckets_evolver, key) + reallocation_required = len(self._buckets_evolver) < 0.67 * self._size + if bucket: + for k, v in bucket: + if k == key: + if v is not val: + new_bucket = [(k2, v2) if k2 != k else (k2, val) for k2, v2 in bucket] + self._buckets_evolver[index] = new_bucket + + return self + + # Only check and perform reallocation if not replacing an existing value. + # This is a performance tweak, see #247. + if reallocation_required: + self._reallocate() + return self.set(key, val) + + new_bucket = [kv] + new_bucket.extend(bucket) + self._buckets_evolver[index] = new_bucket + self._size += 1 + else: + if reallocation_required: + self._reallocate() + return self.set(key, val) + + self._buckets_evolver[index] = [kv] + self._size += 1 + + return self + + def _reallocate(self): + new_size = 2 * len(self._buckets_evolver) + new_list = new_size * [None] + buckets = self._buckets_evolver.persistent() + for k, v in chain.from_iterable(x for x in buckets if x): + index = hash(k) % new_size + if new_list[index]: + new_list[index].append((k, v)) + else: + new_list[index] = [(k, v)] + + # A reallocation should always result in a dirty buckets evolver to avoid + # possible loss of elements when doing the reallocation. + self._buckets_evolver = pvector().evolver() + self._buckets_evolver.extend(new_list) + + def is_dirty(self): + return self._buckets_evolver.is_dirty() + + def persistent(self): + if self.is_dirty(): + self._original_pmap = PMap(self._size, self._buckets_evolver.persistent()) + + return self._original_pmap + + def __len__(self): + return self._size + + def __contains__(self, key): + return PMap._contains(self._buckets_evolver, key) + + def __delitem__(self, key): + self.remove(key) + + def remove(self, key): + index, bucket = PMap._get_bucket(self._buckets_evolver, key) + + if bucket: + new_bucket = [(k, v) for (k, v) in bucket if k != key] + if len(bucket) > len(new_bucket): + self._buckets_evolver[index] = new_bucket if new_bucket else None + self._size -= 1 + return self + + raise KeyError('{0}'.format(key)) + + def evolver(self): + """ + Create a new evolver for this pmap. For a discussion on evolvers in general see the + documentation for the pvector evolver. + + Create the evolver and perform various mutating updates to it: + + >>> m1 = m(a=1, b=2) + >>> e = m1.evolver() + >>> e['c'] = 3 + >>> len(e) + 3 + >>> del e['a'] + + The underlying pmap remains the same: + + >>> m1 == {'a': 1, 'b': 2} + True + + The changes are kept in the evolver. An updated pmap can be created using the + persistent() function on the evolver. + + >>> m2 = e.persistent() + >>> m2 == {'b': 2, 'c': 3} + True + + The new pmap will share data with the original pmap in the same way that would have + been done if only using operations on the pmap. + """ + return self._Evolver(self) + +Mapping.register(PMap) +Hashable.register(PMap) + + +def _turbo_mapping(initial, pre_size): + if pre_size: + size = pre_size + else: + try: + size = 2 * len(initial) or 8 + except Exception: + # Guess we can't figure out the length. Give up on length hinting, + # we can always reallocate later. + size = 8 + + buckets = size * [None] + + if not isinstance(initial, Mapping): + # Make a dictionary of the initial data if it isn't already, + # that will save us some job further down since we can assume no + # key collisions + initial = dict(initial) + + for k, v in initial.items(): + h = hash(k) + index = h % size + bucket = buckets[index] + + if bucket: + bucket.append((k, v)) + else: + buckets[index] = [(k, v)] + + return PMap(len(initial), pvector().extend(buckets)) + + +_EMPTY_PMAP = _turbo_mapping({}, 0) + + +def pmap(initial={}, pre_size=0): + """ + Create new persistent map, inserts all elements in initial into the newly created map. + The optional argument pre_size may be used to specify an initial size of the underlying bucket vector. This + may have a positive performance impact in the cases where you know beforehand that a large number of elements + will be inserted into the map eventually since it will reduce the number of reallocations required. + + >>> pmap({'a': 13, 'b': 14}) == {'a': 13, 'b': 14} + True + """ + if not initial and pre_size == 0: + return _EMPTY_PMAP + + return _turbo_mapping(initial, pre_size) + + +def m(**kwargs): + """ + Creates a new persistent map. Inserts all key value arguments into the newly created map. + + >>> m(a=13, b=14) == {'a': 13, 'b': 14} + True + """ + return pmap(kwargs) diff --git a/src/poetry/core/_vendor/pyrsistent/_precord.py b/src/poetry/core/_vendor/pyrsistent/_precord.py new file mode 100644 index 0000000..1ee8198 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_precord.py @@ -0,0 +1,167 @@ +from pyrsistent._checked_types import CheckedType, _restore_pickle, InvariantException, store_invariants +from pyrsistent._field_common import ( + set_fields, check_type, is_field_ignore_extra_complaint, PFIELD_NO_INITIAL, serialize, check_global_invariants +) +from pyrsistent._pmap import PMap, pmap + + +class _PRecordMeta(type): + def __new__(mcs, name, bases, dct): + set_fields(dct, bases, name='_precord_fields') + store_invariants(dct, bases, '_precord_invariants', '__invariant__') + + dct['_precord_mandatory_fields'] = \ + set(name for name, field in dct['_precord_fields'].items() if field.mandatory) + + dct['_precord_initial_values'] = \ + dict((k, field.initial) for k, field in dct['_precord_fields'].items() if field.initial is not PFIELD_NO_INITIAL) + + + dct['__slots__'] = () + + return super(_PRecordMeta, mcs).__new__(mcs, name, bases, dct) + + +class PRecord(PMap, CheckedType, metaclass=_PRecordMeta): + """ + A PRecord is a PMap with a fixed set of specified fields. Records are declared as python classes inheriting + from PRecord. Because it is a PMap it has full support for all Mapping methods such as iteration and element + access using subscript notation. + + More documentation and examples of PRecord usage is available at https://github.com/tobgu/pyrsistent + """ + def __new__(cls, **kwargs): + # Hack total! If these two special attributes exist that means we can create + # ourselves. Otherwise we need to go through the Evolver to create the structures + # for us. + if '_precord_size' in kwargs and '_precord_buckets' in kwargs: + return super(PRecord, cls).__new__(cls, kwargs['_precord_size'], kwargs['_precord_buckets']) + + factory_fields = kwargs.pop('_factory_fields', None) + ignore_extra = kwargs.pop('_ignore_extra', False) + + initial_values = kwargs + if cls._precord_initial_values: + initial_values = dict((k, v() if callable(v) else v) + for k, v in cls._precord_initial_values.items()) + initial_values.update(kwargs) + + e = _PRecordEvolver(cls, pmap(pre_size=len(cls._precord_fields)), _factory_fields=factory_fields, _ignore_extra=ignore_extra) + for k, v in initial_values.items(): + e[k] = v + + return e.persistent() + + def set(self, *args, **kwargs): + """ + Set a field in the record. This set function differs slightly from that in the PMap + class. First of all it accepts key-value pairs. Second it accepts multiple key-value + pairs to perform one, atomic, update of multiple fields. + """ + + # The PRecord set() can accept kwargs since all fields that have been declared are + # valid python identifiers. Also allow multiple fields to be set in one operation. + if args: + return super(PRecord, self).set(args[0], args[1]) + + return self.update(kwargs) + + def evolver(self): + """ + Returns an evolver of this object. + """ + return _PRecordEvolver(self.__class__, self) + + def __repr__(self): + return "{0}({1})".format(self.__class__.__name__, + ', '.join('{0}={1}'.format(k, repr(v)) for k, v in self.items())) + + @classmethod + def create(cls, kwargs, _factory_fields=None, ignore_extra=False): + """ + Factory method. Will create a new PRecord of the current type and assign the values + specified in kwargs. + + :param ignore_extra: A boolean which when set to True will ignore any keys which appear in kwargs that are not + in the set of fields on the PRecord. + """ + if isinstance(kwargs, cls): + return kwargs + + if ignore_extra: + kwargs = {k: kwargs[k] for k in cls._precord_fields if k in kwargs} + + return cls(_factory_fields=_factory_fields, _ignore_extra=ignore_extra, **kwargs) + + def __reduce__(self): + # Pickling support + return _restore_pickle, (self.__class__, dict(self),) + + def serialize(self, format=None): + """ + Serialize the current PRecord using custom serializer functions for fields where + such have been supplied. + """ + return dict((k, serialize(self._precord_fields[k].serializer, format, v)) for k, v in self.items()) + + +class _PRecordEvolver(PMap._Evolver): + __slots__ = ('_destination_cls', '_invariant_error_codes', '_missing_fields', '_factory_fields', '_ignore_extra') + + def __init__(self, cls, original_pmap, _factory_fields=None, _ignore_extra=False): + super(_PRecordEvolver, self).__init__(original_pmap) + self._destination_cls = cls + self._invariant_error_codes = [] + self._missing_fields = [] + self._factory_fields = _factory_fields + self._ignore_extra = _ignore_extra + + def __setitem__(self, key, original_value): + self.set(key, original_value) + + def set(self, key, original_value): + field = self._destination_cls._precord_fields.get(key) + if field: + if self._factory_fields is None or field in self._factory_fields: + try: + if is_field_ignore_extra_complaint(PRecord, field, self._ignore_extra): + value = field.factory(original_value, ignore_extra=self._ignore_extra) + else: + value = field.factory(original_value) + except InvariantException as e: + self._invariant_error_codes += e.invariant_errors + self._missing_fields += e.missing_fields + return self + else: + value = original_value + + check_type(self._destination_cls, field, key, value) + + is_ok, error_code = field.invariant(value) + if not is_ok: + self._invariant_error_codes.append(error_code) + + return super(_PRecordEvolver, self).set(key, value) + else: + raise AttributeError("'{0}' is not among the specified fields for {1}".format(key, self._destination_cls.__name__)) + + def persistent(self): + cls = self._destination_cls + is_dirty = self.is_dirty() + pm = super(_PRecordEvolver, self).persistent() + if is_dirty or not isinstance(pm, cls): + result = cls(_precord_buckets=pm._buckets, _precord_size=pm._size) + else: + result = pm + + if cls._precord_mandatory_fields: + self._missing_fields += tuple('{0}.{1}'.format(cls.__name__, f) for f + in (cls._precord_mandatory_fields - set(result.keys()))) + + if self._invariant_error_codes or self._missing_fields: + raise InvariantException(tuple(self._invariant_error_codes), tuple(self._missing_fields), + 'Field invariant failed') + + check_global_invariants(result, cls._precord_invariants) + + return result diff --git a/src/poetry/core/_vendor/pyrsistent/_pset.py b/src/poetry/core/_vendor/pyrsistent/_pset.py new file mode 100644 index 0000000..4fae827 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pset.py @@ -0,0 +1,227 @@ +from collections.abc import Set, Hashable +import sys +from pyrsistent._pmap import pmap + + +class PSet(object): + """ + Persistent set implementation. Built on top of the persistent map. The set supports all operations + in the Set protocol and is Hashable. + + Do not instantiate directly, instead use the factory functions :py:func:`s` or :py:func:`pset` + to create an instance. + + Random access and insert is log32(n) where n is the size of the set. + + Some examples: + + >>> s = pset([1, 2, 3, 1]) + >>> s2 = s.add(4) + >>> s3 = s2.remove(2) + >>> s + pset([1, 2, 3]) + >>> s2 + pset([1, 2, 3, 4]) + >>> s3 + pset([1, 3, 4]) + """ + __slots__ = ('_map', '__weakref__') + + def __new__(cls, m): + self = super(PSet, cls).__new__(cls) + self._map = m + return self + + def __contains__(self, element): + return element in self._map + + def __iter__(self): + return iter(self._map) + + def __len__(self): + return len(self._map) + + def __repr__(self): + if not self: + return 'p' + str(set(self)) + + return 'pset([{0}])'.format(str(set(self))[1:-1]) + + def __str__(self): + return self.__repr__() + + def __hash__(self): + return hash(self._map) + + def __reduce__(self): + # Pickling support + return pset, (list(self),) + + @classmethod + def _from_iterable(cls, it, pre_size=8): + return PSet(pmap(dict((k, True) for k in it), pre_size=pre_size)) + + def add(self, element): + """ + Return a new PSet with element added + + >>> s1 = s(1, 2) + >>> s1.add(3) + pset([1, 2, 3]) + """ + return self.evolver().add(element).persistent() + + def update(self, iterable): + """ + Return a new PSet with elements in iterable added + + >>> s1 = s(1, 2) + >>> s1.update([3, 4, 4]) + pset([1, 2, 3, 4]) + """ + e = self.evolver() + for element in iterable: + e.add(element) + + return e.persistent() + + def remove(self, element): + """ + Return a new PSet with element removed. Raises KeyError if element is not present. + + >>> s1 = s(1, 2) + >>> s1.remove(2) + pset([1]) + """ + if element in self._map: + return self.evolver().remove(element).persistent() + + raise KeyError("Element '%s' not present in PSet" % repr(element)) + + def discard(self, element): + """ + Return a new PSet with element removed. Returns itself if element is not present. + """ + if element in self._map: + return self.evolver().remove(element).persistent() + + return self + + class _Evolver(object): + __slots__ = ('_original_pset', '_pmap_evolver') + + def __init__(self, original_pset): + self._original_pset = original_pset + self._pmap_evolver = original_pset._map.evolver() + + def add(self, element): + self._pmap_evolver[element] = True + return self + + def remove(self, element): + del self._pmap_evolver[element] + return self + + def is_dirty(self): + return self._pmap_evolver.is_dirty() + + def persistent(self): + if not self.is_dirty(): + return self._original_pset + + return PSet(self._pmap_evolver.persistent()) + + def __len__(self): + return len(self._pmap_evolver) + + def copy(self): + return self + + def evolver(self): + """ + Create a new evolver for this pset. For a discussion on evolvers in general see the + documentation for the pvector evolver. + + Create the evolver and perform various mutating updates to it: + + >>> s1 = s(1, 2, 3) + >>> e = s1.evolver() + >>> _ = e.add(4) + >>> len(e) + 4 + >>> _ = e.remove(1) + + The underlying pset remains the same: + + >>> s1 + pset([1, 2, 3]) + + The changes are kept in the evolver. An updated pmap can be created using the + persistent() function on the evolver. + + >>> s2 = e.persistent() + >>> s2 + pset([2, 3, 4]) + + The new pset will share data with the original pset in the same way that would have + been done if only using operations on the pset. + """ + return PSet._Evolver(self) + + # All the operations and comparisons you would expect on a set. + # + # This is not very beautiful. If we avoid inheriting from PSet we can use the + # __slots__ concepts (which requires a new style class) and hopefully save some memory. + __le__ = Set.__le__ + __lt__ = Set.__lt__ + __gt__ = Set.__gt__ + __ge__ = Set.__ge__ + __eq__ = Set.__eq__ + __ne__ = Set.__ne__ + + __and__ = Set.__and__ + __or__ = Set.__or__ + __sub__ = Set.__sub__ + __xor__ = Set.__xor__ + + issubset = __le__ + issuperset = __ge__ + union = __or__ + intersection = __and__ + difference = __sub__ + symmetric_difference = __xor__ + + isdisjoint = Set.isdisjoint + +Set.register(PSet) +Hashable.register(PSet) + +_EMPTY_PSET = PSet(pmap()) + + +def pset(iterable=(), pre_size=8): + """ + Creates a persistent set from iterable. Optionally takes a sizing parameter equivalent to that + used for :py:func:`pmap`. + + >>> s1 = pset([1, 2, 3, 2]) + >>> s1 + pset([1, 2, 3]) + """ + if not iterable: + return _EMPTY_PSET + + return PSet._from_iterable(iterable, pre_size=pre_size) + + +def s(*elements): + """ + Create a persistent set. + + Takes an arbitrary number of arguments to insert into the new set. + + >>> s1 = s(1, 2, 3, 2) + >>> s1 + pset([1, 2, 3]) + """ + return pset(elements) diff --git a/src/poetry/core/_vendor/pyrsistent/_pvector.py b/src/poetry/core/_vendor/pyrsistent/_pvector.py new file mode 100644 index 0000000..2aff0e8 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_pvector.py @@ -0,0 +1,711 @@ +from abc import abstractmethod, ABCMeta +from collections.abc import Sequence, Hashable +from numbers import Integral +import operator +from pyrsistent._transformations import transform + + +def _bitcount(val): + return bin(val).count("1") + +BRANCH_FACTOR = 32 +BIT_MASK = BRANCH_FACTOR - 1 +SHIFT = _bitcount(BIT_MASK) + + +def compare_pvector(v, other, operator): + return operator(v.tolist(), other.tolist() if isinstance(other, PVector) else other) + + +def _index_or_slice(index, stop): + if stop is None: + return index + + return slice(index, stop) + + +class PythonPVector(object): + """ + Support structure for PVector that implements structural sharing for vectors using a trie. + """ + __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '__weakref__') + + def __new__(cls, count, shift, root, tail): + self = super(PythonPVector, cls).__new__(cls) + self._count = count + self._shift = shift + self._root = root + self._tail = tail + + # Derived attribute stored for performance + self._tail_offset = self._count - len(self._tail) + return self + + def __len__(self): + return self._count + + def __getitem__(self, index): + if isinstance(index, slice): + # There are more conditions than the below where it would be OK to + # return ourselves, implement those... + if index.start is None and index.stop is None and index.step is None: + return self + + # This is a bit nasty realizing the whole structure as a list before + # slicing it but it is the fastest way I've found to date, and it's easy :-) + return _EMPTY_PVECTOR.extend(self.tolist()[index]) + + if index < 0: + index += self._count + + return PythonPVector._node_for(self, index)[index & BIT_MASK] + + def __add__(self, other): + return self.extend(other) + + def __repr__(self): + return 'pvector({0})'.format(str(self.tolist())) + + def __str__(self): + return self.__repr__() + + def __iter__(self): + # This is kind of lazy and will produce some memory overhead but it is the fasted method + # by far of those tried since it uses the speed of the built in python list directly. + return iter(self.tolist()) + + def __ne__(self, other): + return not self.__eq__(other) + + def __eq__(self, other): + return self is other or (hasattr(other, '__len__') and self._count == len(other)) and compare_pvector(self, other, operator.eq) + + def __gt__(self, other): + return compare_pvector(self, other, operator.gt) + + def __lt__(self, other): + return compare_pvector(self, other, operator.lt) + + def __ge__(self, other): + return compare_pvector(self, other, operator.ge) + + def __le__(self, other): + return compare_pvector(self, other, operator.le) + + def __mul__(self, times): + if times <= 0 or self is _EMPTY_PVECTOR: + return _EMPTY_PVECTOR + + if times == 1: + return self + + return _EMPTY_PVECTOR.extend(times * self.tolist()) + + __rmul__ = __mul__ + + def _fill_list(self, node, shift, the_list): + if shift: + shift -= SHIFT + for n in node: + self._fill_list(n, shift, the_list) + else: + the_list.extend(node) + + def tolist(self): + """ + The fastest way to convert the vector into a python list. + """ + the_list = [] + self._fill_list(self._root, self._shift, the_list) + the_list.extend(self._tail) + return the_list + + def _totuple(self): + """ + Returns the content as a python tuple. + """ + return tuple(self.tolist()) + + def __hash__(self): + # Taking the easy way out again... + return hash(self._totuple()) + + def transform(self, *transformations): + return transform(self, transformations) + + def __reduce__(self): + # Pickling support + return pvector, (self.tolist(),) + + def mset(self, *args): + if len(args) % 2: + raise TypeError("mset expected an even number of arguments") + + evolver = self.evolver() + for i in range(0, len(args), 2): + evolver[args[i]] = args[i+1] + + return evolver.persistent() + + class Evolver(object): + __slots__ = ('_count', '_shift', '_root', '_tail', '_tail_offset', '_dirty_nodes', + '_extra_tail', '_cached_leafs', '_orig_pvector') + + def __init__(self, v): + self._reset(v) + + def __getitem__(self, index): + if not isinstance(index, Integral): + raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) + + if index < 0: + index += self._count + len(self._extra_tail) + + if self._count <= index < self._count + len(self._extra_tail): + return self._extra_tail[index - self._count] + + return PythonPVector._node_for(self, index)[index & BIT_MASK] + + def _reset(self, v): + self._count = v._count + self._shift = v._shift + self._root = v._root + self._tail = v._tail + self._tail_offset = v._tail_offset + self._dirty_nodes = {} + self._cached_leafs = {} + self._extra_tail = [] + self._orig_pvector = v + + def append(self, element): + self._extra_tail.append(element) + return self + + def extend(self, iterable): + self._extra_tail.extend(iterable) + return self + + def set(self, index, val): + self[index] = val + return self + + def __setitem__(self, index, val): + if not isinstance(index, Integral): + raise TypeError("'%s' object cannot be interpreted as an index" % type(index).__name__) + + if index < 0: + index += self._count + len(self._extra_tail) + + if 0 <= index < self._count: + node = self._cached_leafs.get(index >> SHIFT) + if node: + node[index & BIT_MASK] = val + elif index >= self._tail_offset: + if id(self._tail) not in self._dirty_nodes: + self._tail = list(self._tail) + self._dirty_nodes[id(self._tail)] = True + self._cached_leafs[index >> SHIFT] = self._tail + self._tail[index & BIT_MASK] = val + else: + self._root = self._do_set(self._shift, self._root, index, val) + elif self._count <= index < self._count + len(self._extra_tail): + self._extra_tail[index - self._count] = val + elif index == self._count + len(self._extra_tail): + self._extra_tail.append(val) + else: + raise IndexError("Index out of range: %s" % (index,)) + + def _do_set(self, level, node, i, val): + if id(node) in self._dirty_nodes: + ret = node + else: + ret = list(node) + self._dirty_nodes[id(ret)] = True + + if level == 0: + ret[i & BIT_MASK] = val + self._cached_leafs[i >> SHIFT] = ret + else: + sub_index = (i >> level) & BIT_MASK # >>> + ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) + + return ret + + def delete(self, index): + del self[index] + return self + + def __delitem__(self, key): + if self._orig_pvector: + # All structural sharing bets are off, base evolver on _extra_tail only + l = PythonPVector(self._count, self._shift, self._root, self._tail).tolist() + l.extend(self._extra_tail) + self._reset(_EMPTY_PVECTOR) + self._extra_tail = l + + del self._extra_tail[key] + + def persistent(self): + result = self._orig_pvector + if self.is_dirty(): + result = PythonPVector(self._count, self._shift, self._root, self._tail).extend(self._extra_tail) + self._reset(result) + + return result + + def __len__(self): + return self._count + len(self._extra_tail) + + def is_dirty(self): + return bool(self._dirty_nodes or self._extra_tail) + + def evolver(self): + return PythonPVector.Evolver(self) + + def set(self, i, val): + # This method could be implemented by a call to mset() but doing so would cause + # a ~5 X performance penalty on PyPy (considered the primary platform for this implementation + # of PVector) so we're keeping this implementation for now. + + if not isinstance(i, Integral): + raise TypeError("'%s' object cannot be interpreted as an index" % type(i).__name__) + + if i < 0: + i += self._count + + if 0 <= i < self._count: + if i >= self._tail_offset: + new_tail = list(self._tail) + new_tail[i & BIT_MASK] = val + return PythonPVector(self._count, self._shift, self._root, new_tail) + + return PythonPVector(self._count, self._shift, self._do_set(self._shift, self._root, i, val), self._tail) + + if i == self._count: + return self.append(val) + + raise IndexError("Index out of range: %s" % (i,)) + + def _do_set(self, level, node, i, val): + ret = list(node) + if level == 0: + ret[i & BIT_MASK] = val + else: + sub_index = (i >> level) & BIT_MASK # >>> + ret[sub_index] = self._do_set(level - SHIFT, node[sub_index], i, val) + + return ret + + @staticmethod + def _node_for(pvector_like, i): + if 0 <= i < pvector_like._count: + if i >= pvector_like._tail_offset: + return pvector_like._tail + + node = pvector_like._root + for level in range(pvector_like._shift, 0, -SHIFT): + node = node[(i >> level) & BIT_MASK] # >>> + + return node + + raise IndexError("Index out of range: %s" % (i,)) + + def _create_new_root(self): + new_shift = self._shift + + # Overflow root? + if (self._count >> SHIFT) > (1 << self._shift): # >>> + new_root = [self._root, self._new_path(self._shift, self._tail)] + new_shift += SHIFT + else: + new_root = self._push_tail(self._shift, self._root, self._tail) + + return new_root, new_shift + + def append(self, val): + if len(self._tail) < BRANCH_FACTOR: + new_tail = list(self._tail) + new_tail.append(val) + return PythonPVector(self._count + 1, self._shift, self._root, new_tail) + + # Full tail, push into tree + new_root, new_shift = self._create_new_root() + return PythonPVector(self._count + 1, new_shift, new_root, [val]) + + def _new_path(self, level, node): + if level == 0: + return node + + return [self._new_path(level - SHIFT, node)] + + def _mutating_insert_tail(self): + self._root, self._shift = self._create_new_root() + self._tail = [] + + def _mutating_fill_tail(self, offset, sequence): + max_delta_len = BRANCH_FACTOR - len(self._tail) + delta = sequence[offset:offset + max_delta_len] + self._tail.extend(delta) + delta_len = len(delta) + self._count += delta_len + return offset + delta_len + + def _mutating_extend(self, sequence): + offset = 0 + sequence_len = len(sequence) + while offset < sequence_len: + offset = self._mutating_fill_tail(offset, sequence) + if len(self._tail) == BRANCH_FACTOR: + self._mutating_insert_tail() + + self._tail_offset = self._count - len(self._tail) + + def extend(self, obj): + # Mutates the new vector directly for efficiency but that's only an + # implementation detail, once it is returned it should be considered immutable + l = obj.tolist() if isinstance(obj, PythonPVector) else list(obj) + if l: + new_vector = self.append(l[0]) + new_vector._mutating_extend(l[1:]) + return new_vector + + return self + + def _push_tail(self, level, parent, tail_node): + """ + if parent is leaf, insert node, + else does it map to an existing child? -> + node_to_insert = push node one more level + else alloc new path + + return node_to_insert placed in copy of parent + """ + ret = list(parent) + + if level == SHIFT: + ret.append(tail_node) + return ret + + sub_index = ((self._count - 1) >> level) & BIT_MASK # >>> + if len(parent) > sub_index: + ret[sub_index] = self._push_tail(level - SHIFT, parent[sub_index], tail_node) + return ret + + ret.append(self._new_path(level - SHIFT, tail_node)) + return ret + + def index(self, value, *args, **kwargs): + return self.tolist().index(value, *args, **kwargs) + + def count(self, value): + return self.tolist().count(value) + + def delete(self, index, stop=None): + l = self.tolist() + del l[_index_or_slice(index, stop)] + return _EMPTY_PVECTOR.extend(l) + + def remove(self, value): + l = self.tolist() + l.remove(value) + return _EMPTY_PVECTOR.extend(l) + +class PVector(metaclass=ABCMeta): + """ + Persistent vector implementation. Meant as a replacement for the cases where you would normally + use a Python list. + + Do not instantiate directly, instead use the factory functions :py:func:`v` and :py:func:`pvector` to + create an instance. + + Heavily influenced by the persistent vector available in Clojure. Initially this was more or + less just a port of the Java code for the Clojure vector. It has since been modified and to + some extent optimized for usage in Python. + + The vector is organized as a trie, any mutating method will return a new vector that contains the changes. No + updates are done to the original vector. Structural sharing between vectors are applied where possible to save + space and to avoid making complete copies. + + This structure corresponds most closely to the built in list type and is intended as a replacement. Where the + semantics are the same (more or less) the same function names have been used but for some cases it is not possible, + for example assignments. + + The PVector implements the Sequence protocol and is Hashable. + + Inserts are amortized O(1). Random access is log32(n) where n is the size of the vector. + + The following are examples of some common operations on persistent vectors: + + >>> p = v(1, 2, 3) + >>> p2 = p.append(4) + >>> p3 = p2.extend([5, 6, 7]) + >>> p + pvector([1, 2, 3]) + >>> p2 + pvector([1, 2, 3, 4]) + >>> p3 + pvector([1, 2, 3, 4, 5, 6, 7]) + >>> p3[5] + 6 + >>> p.set(1, 99) + pvector([1, 99, 3]) + >>> + """ + + @abstractmethod + def __len__(self): + """ + >>> len(v(1, 2, 3)) + 3 + """ + + @abstractmethod + def __getitem__(self, index): + """ + Get value at index. Full slicing support. + + >>> v1 = v(5, 6, 7, 8) + >>> v1[2] + 7 + >>> v1[1:3] + pvector([6, 7]) + """ + + @abstractmethod + def __add__(self, other): + """ + >>> v1 = v(1, 2) + >>> v2 = v(3, 4) + >>> v1 + v2 + pvector([1, 2, 3, 4]) + """ + + @abstractmethod + def __mul__(self, times): + """ + >>> v1 = v(1, 2) + >>> 3 * v1 + pvector([1, 2, 1, 2, 1, 2]) + """ + + @abstractmethod + def __hash__(self): + """ + >>> v1 = v(1, 2, 3) + >>> v2 = v(1, 2, 3) + >>> hash(v1) == hash(v2) + True + """ + + @abstractmethod + def evolver(self): + """ + Create a new evolver for this pvector. The evolver acts as a mutable view of the vector + with "transaction like" semantics. No part of the underlying vector i updated, it is still + fully immutable. Furthermore multiple evolvers created from the same pvector do not + interfere with each other. + + You may want to use an evolver instead of working directly with the pvector in the + following cases: + + * Multiple updates are done to the same vector and the intermediate results are of no + interest. In this case using an evolver may be a more efficient and easier to work with. + * You need to pass a vector into a legacy function or a function that you have no control + over which performs in place mutations of lists. In this case pass an evolver instance + instead and then create a new pvector from the evolver once the function returns. + + The following example illustrates a typical workflow when working with evolvers. It also + displays most of the API (which i kept small by design, you should not be tempted to + use evolvers in excess ;-)). + + Create the evolver and perform various mutating updates to it: + + >>> v1 = v(1, 2, 3, 4, 5) + >>> e = v1.evolver() + >>> e[1] = 22 + >>> _ = e.append(6) + >>> _ = e.extend([7, 8, 9]) + >>> e[8] += 1 + >>> len(e) + 9 + + The underlying pvector remains the same: + + >>> v1 + pvector([1, 2, 3, 4, 5]) + + The changes are kept in the evolver. An updated pvector can be created using the + persistent() function on the evolver. + + >>> v2 = e.persistent() + >>> v2 + pvector([1, 22, 3, 4, 5, 6, 7, 8, 10]) + + The new pvector will share data with the original pvector in the same way that would have + been done if only using operations on the pvector. + """ + + @abstractmethod + def mset(self, *args): + """ + Return a new vector with elements in specified positions replaced by values (multi set). + + Elements on even positions in the argument list are interpreted as indexes while + elements on odd positions are considered values. + + >>> v1 = v(1, 2, 3) + >>> v1.mset(0, 11, 2, 33) + pvector([11, 2, 33]) + """ + + @abstractmethod + def set(self, i, val): + """ + Return a new vector with element at position i replaced with val. The original vector remains unchanged. + + Setting a value one step beyond the end of the vector is equal to appending. Setting beyond that will + result in an IndexError. + + >>> v1 = v(1, 2, 3) + >>> v1.set(1, 4) + pvector([1, 4, 3]) + >>> v1.set(3, 4) + pvector([1, 2, 3, 4]) + >>> v1.set(-1, 4) + pvector([1, 2, 4]) + """ + + @abstractmethod + def append(self, val): + """ + Return a new vector with val appended. + + >>> v1 = v(1, 2) + >>> v1.append(3) + pvector([1, 2, 3]) + """ + + @abstractmethod + def extend(self, obj): + """ + Return a new vector with all values in obj appended to it. Obj may be another + PVector or any other Iterable. + + >>> v1 = v(1, 2, 3) + >>> v1.extend([4, 5]) + pvector([1, 2, 3, 4, 5]) + """ + + @abstractmethod + def index(self, value, *args, **kwargs): + """ + Return first index of value. Additional indexes may be supplied to limit the search to a + sub range of the vector. + + >>> v1 = v(1, 2, 3, 4, 3) + >>> v1.index(3) + 2 + >>> v1.index(3, 3, 5) + 4 + """ + + @abstractmethod + def count(self, value): + """ + Return the number of times that value appears in the vector. + + >>> v1 = v(1, 4, 3, 4) + >>> v1.count(4) + 2 + """ + + @abstractmethod + def transform(self, *transformations): + """ + Transform arbitrarily complex combinations of PVectors and PMaps. A transformation + consists of two parts. One match expression that specifies which elements to transform + and one transformation function that performs the actual transformation. + + >>> from pyrsistent import freeze, ny + >>> news_paper = freeze({'articles': [{'author': 'Sara', 'content': 'A short article'}, + ... {'author': 'Steve', 'content': 'A slightly longer article'}], + ... 'weather': {'temperature': '11C', 'wind': '5m/s'}}) + >>> short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:25] + '...' if len(c) > 25 else c) + >>> very_short_news = news_paper.transform(['articles', ny, 'content'], lambda c: c[:15] + '...' if len(c) > 15 else c) + >>> very_short_news.articles[0].content + 'A short article' + >>> very_short_news.articles[1].content + 'A slightly long...' + + When nothing has been transformed the original data structure is kept + + >>> short_news is news_paper + True + >>> very_short_news is news_paper + False + >>> very_short_news.articles[0] is news_paper.articles[0] + True + """ + + @abstractmethod + def delete(self, index, stop=None): + """ + Delete a portion of the vector by index or range. + + >>> v1 = v(1, 2, 3, 4, 5) + >>> v1.delete(1) + pvector([1, 3, 4, 5]) + >>> v1.delete(1, 3) + pvector([1, 4, 5]) + """ + + @abstractmethod + def remove(self, value): + """ + Remove the first occurrence of a value from the vector. + + >>> v1 = v(1, 2, 3, 2, 1) + >>> v2 = v1.remove(1) + >>> v2 + pvector([2, 3, 2, 1]) + >>> v2.remove(1) + pvector([2, 3, 2]) + """ + + +_EMPTY_PVECTOR = PythonPVector(0, SHIFT, [], []) +PVector.register(PythonPVector) +Sequence.register(PVector) +Hashable.register(PVector) + +def python_pvector(iterable=()): + """ + Create a new persistent vector containing the elements in iterable. + + >>> v1 = pvector([1, 2, 3]) + >>> v1 + pvector([1, 2, 3]) + """ + return _EMPTY_PVECTOR.extend(iterable) + +try: + # Use the C extension as underlying trie implementation if it is available + import os + if os.environ.get('PYRSISTENT_NO_C_EXTENSION'): + pvector = python_pvector + else: + from pvectorc import pvector + PVector.register(type(pvector())) +except ImportError: + pvector = python_pvector + + +def v(*elements): + """ + Create a new persistent vector containing all parameters to this function. + + >>> v1 = v(1, 2, 3) + >>> v1 + pvector([1, 2, 3]) + """ + return pvector(elements) diff --git a/src/poetry/core/_vendor/pyrsistent/_toolz.py b/src/poetry/core/_vendor/pyrsistent/_toolz.py new file mode 100644 index 0000000..0bf2cb1 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_toolz.py @@ -0,0 +1,83 @@ +""" +Functionality copied from the toolz package to avoid having +to add toolz as a dependency. + +See https://github.com/pytoolz/toolz/. + +toolz is released under BSD licence. Below is the licence text +from toolz as it appeared when copying the code. + +-------------------------------------------------------------- + +Copyright (c) 2013 Matthew Rocklin + +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + a. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + b. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + c. Neither the name of toolz nor the names of its contributors + may be used to endorse or promote products derived from this software + without specific prior written permission. + + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY +OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. +""" +import operator +from functools import reduce + + +def get_in(keys, coll, default=None, no_default=False): + """ + NB: This is a straight copy of the get_in implementation found in + the toolz library (https://github.com/pytoolz/toolz/). It works + with persistent data structures as well as the corresponding + datastructures from the stdlib. + + Returns coll[i0][i1]...[iX] where [i0, i1, ..., iX]==keys. + + If coll[i0][i1]...[iX] cannot be found, returns ``default``, unless + ``no_default`` is specified, then it raises KeyError or IndexError. + + ``get_in`` is a generalization of ``operator.getitem`` for nested data + structures such as dictionaries and lists. + >>> from pyrsistent import freeze + >>> transaction = freeze({'name': 'Alice', + ... 'purchase': {'items': ['Apple', 'Orange'], + ... 'costs': [0.50, 1.25]}, + ... 'credit card': '5555-1234-1234-1234'}) + >>> get_in(['purchase', 'items', 0], transaction) + 'Apple' + >>> get_in(['name'], transaction) + 'Alice' + >>> get_in(['purchase', 'total'], transaction) + >>> get_in(['purchase', 'items', 'apple'], transaction) + >>> get_in(['purchase', 'items', 10], transaction) + >>> get_in(['purchase', 'total'], transaction, 0) + 0 + >>> get_in(['y'], {}, no_default=True) + Traceback (most recent call last): + ... + KeyError: 'y' + """ + try: + return reduce(operator.getitem, keys, coll) + except (KeyError, IndexError, TypeError): + if no_default: + raise + return default diff --git a/src/poetry/core/_vendor/pyrsistent/_transformations.py b/src/poetry/core/_vendor/pyrsistent/_transformations.py new file mode 100644 index 0000000..7544843 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/_transformations.py @@ -0,0 +1,139 @@ +import re +try: + from inspect import Parameter, signature +except ImportError: + signature = None + from inspect import getfullargspec + + +_EMPTY_SENTINEL = object() + + +def inc(x): + """ Add one to the current value """ + return x + 1 + + +def dec(x): + """ Subtract one from the current value """ + return x - 1 + + +def discard(evolver, key): + """ Discard the element and returns a structure without the discarded elements """ + try: + del evolver[key] + except KeyError: + pass + + +# Matchers +def rex(expr): + """ Regular expression matcher to use together with transform functions """ + r = re.compile(expr) + return lambda key: isinstance(key, str) and r.match(key) + + +def ny(_): + """ Matcher that matches any value """ + return True + + +# Support functions +def _chunks(l, n): + for i in range(0, len(l), n): + yield l[i:i + n] + + +def transform(structure, transformations): + r = structure + for path, command in _chunks(transformations, 2): + r = _do_to_path(r, path, command) + return r + + +def _do_to_path(structure, path, command): + if not path: + return command(structure) if callable(command) else command + + kvs = _get_keys_and_values(structure, path[0]) + return _update_structure(structure, kvs, path[1:], command) + + +def _items(structure): + try: + return structure.items() + except AttributeError: + # Support wider range of structures by adding a transform_items() or similar? + return list(enumerate(structure)) + + +def _get(structure, key, default): + try: + if hasattr(structure, '__getitem__'): + return structure[key] + + return getattr(structure, key) + + except (IndexError, KeyError): + return default + + +def _get_keys_and_values(structure, key_spec): + if callable(key_spec): + # Support predicates as callable objects in the path + arity = _get_arity(key_spec) + if arity == 1: + # Unary predicates are called with the "key" of the path + # - eg a key in a mapping, an index in a sequence. + return [(k, v) for k, v in _items(structure) if key_spec(k)] + elif arity == 2: + # Binary predicates are called with the key and the corresponding + # value. + return [(k, v) for k, v in _items(structure) if key_spec(k, v)] + else: + # Other arities are an error. + raise ValueError( + "callable in transform path must take 1 or 2 arguments" + ) + + # Non-callables are used as-is as a key. + return [(key_spec, _get(structure, key_spec, _EMPTY_SENTINEL))] + + +if signature is None: + def _get_arity(f): + argspec = getfullargspec(f) + return len(argspec.args) - len(argspec.defaults or ()) +else: + def _get_arity(f): + return sum( + 1 + for p + in signature(f).parameters.values() + if p.default is Parameter.empty + and p.kind in (Parameter.POSITIONAL_ONLY, Parameter.POSITIONAL_OR_KEYWORD) + ) + + +def _update_structure(structure, kvs, path, command): + from pyrsistent._pmap import pmap + e = structure.evolver() + if not path and command is discard: + # Do this in reverse to avoid index problems with vectors. See #92. + for k, v in reversed(kvs): + discard(e, k) + else: + for k, v in kvs: + is_empty = False + if v is _EMPTY_SENTINEL: + # Allow expansion of structure but make sure to cover the case + # when an empty pmap is added as leaf node. See #154. + is_empty = True + v = pmap() + + result = _do_to_path(v, path, command) + if result is not v or is_empty: + e[k] = result + + return e.persistent() diff --git a/src/poetry/core/_vendor/pyrsistent/py.typed b/src/poetry/core/_vendor/pyrsistent/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/pyrsistent/typing.py b/src/poetry/core/_vendor/pyrsistent/typing.py new file mode 100644 index 0000000..6a86c83 --- /dev/null +++ b/src/poetry/core/_vendor/pyrsistent/typing.py @@ -0,0 +1,80 @@ +"""Helpers for use with type annotation. + +Use the empty classes in this module when annotating the types of Pyrsistent +objects, instead of using the actual collection class. + +For example, + + from pyrsistent import pvector + from pyrsistent.typing import PVector + + myvector: PVector[str] = pvector(['a', 'b', 'c']) + +""" +from __future__ import absolute_import + +try: + from typing import Container + from typing import Hashable + from typing import Generic + from typing import Iterable + from typing import Mapping + from typing import Sequence + from typing import Sized + from typing import TypeVar + + __all__ = [ + 'CheckedPMap', + 'CheckedPSet', + 'CheckedPVector', + 'PBag', + 'PDeque', + 'PList', + 'PMap', + 'PSet', + 'PVector', + ] + + T = TypeVar('T') + KT = TypeVar('KT') + VT = TypeVar('VT') + + class CheckedPMap(Mapping[KT, VT], Hashable): + pass + + # PSet.add and PSet.discard have different type signatures than that of Set. + class CheckedPSet(Generic[T], Hashable): + pass + + class CheckedPVector(Sequence[T], Hashable): + pass + + class PBag(Container[T], Iterable[T], Sized, Hashable): + pass + + class PDeque(Sequence[T], Hashable): + pass + + class PList(Sequence[T], Hashable): + pass + + class PMap(Mapping[KT, VT], Hashable): + pass + + # PSet.add and PSet.discard have different type signatures than that of Set. + class PSet(Generic[T], Hashable): + pass + + class PVector(Sequence[T], Hashable): + pass + + class PVectorEvolver(Generic[T]): + pass + + class PMapEvolver(Generic[KT, VT]): + pass + + class PSetEvolver(Generic[T]): + pass +except ImportError: + pass diff --git a/src/poetry/core/_vendor/tomlkit/LICENSE b/src/poetry/core/_vendor/tomlkit/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/src/poetry/core/_vendor/tomlkit/__init__.py b/src/poetry/core/_vendor/tomlkit/__init__.py new file mode 100644 index 0000000..584bd96 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/__init__.py @@ -0,0 +1,55 @@ +from tomlkit.api import TOMLDocument +from tomlkit.api import aot +from tomlkit.api import array +from tomlkit.api import boolean +from tomlkit.api import comment +from tomlkit.api import date +from tomlkit.api import datetime +from tomlkit.api import document +from tomlkit.api import dump +from tomlkit.api import dumps +from tomlkit.api import float_ +from tomlkit.api import inline_table +from tomlkit.api import integer +from tomlkit.api import item +from tomlkit.api import key +from tomlkit.api import key_value +from tomlkit.api import load +from tomlkit.api import loads +from tomlkit.api import nl +from tomlkit.api import parse +from tomlkit.api import string +from tomlkit.api import table +from tomlkit.api import time +from tomlkit.api import value +from tomlkit.api import ws + + +__version__ = "0.11.6" +__all__ = [ + "aot", + "array", + "boolean", + "comment", + "date", + "datetime", + "document", + "dump", + "dumps", + "float_", + "inline_table", + "integer", + "item", + "key", + "key_value", + "load", + "loads", + "nl", + "parse", + "string", + "table", + "time", + "TOMLDocument", + "value", + "ws", +] diff --git a/src/poetry/core/_vendor/tomlkit/_compat.py b/src/poetry/core/_vendor/tomlkit/_compat.py new file mode 100644 index 0000000..f1d3bcc --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/_compat.py @@ -0,0 +1,22 @@ +import contextlib +import sys + +from typing import Any +from typing import List +from typing import Optional + + +PY38 = sys.version_info >= (3, 8) + + +def decode(string: Any, encodings: Optional[List[str]] = None): + if not isinstance(string, bytes): + return string + + encodings = encodings or ["utf-8", "latin1", "ascii"] + + for encoding in encodings: + with contextlib.suppress(UnicodeEncodeError, UnicodeDecodeError): + return string.decode(encoding) + + return string.decode(encodings[0], errors="ignore") diff --git a/src/poetry/core/_vendor/tomlkit/_utils.py b/src/poetry/core/_vendor/tomlkit/_utils.py new file mode 100644 index 0000000..85958e9 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/_utils.py @@ -0,0 +1,155 @@ +import re + +from collections.abc import Mapping +from datetime import date +from datetime import datetime +from datetime import time +from datetime import timedelta +from datetime import timezone +from typing import Collection +from typing import Union + +from tomlkit._compat import decode + + +RFC_3339_LOOSE = re.compile( + "^" + r"(([0-9]+)-(\d{2})-(\d{2}))?" # Date + "(" + "([Tt ])?" # Separator + r"(\d{2}):(\d{2}):(\d{2})(\.([0-9]+))?" # Time + r"(([Zz])|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone + ")?" + "$" +) + +RFC_3339_DATETIME = re.compile( + "^" + "([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])" # Date + "[Tt ]" # Separator + r"([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?" # Time + r"(([Zz])|([\+|\-]([01][0-9]|2[0-3]):([0-5][0-9])))?" # Timezone + "$" +) + +RFC_3339_DATE = re.compile("^([0-9]+)-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$") + +RFC_3339_TIME = re.compile( + r"^([01][0-9]|2[0-3]):([0-5][0-9]):([0-5][0-9]|60)(\.([0-9]+))?$" +) + +_utc = timezone(timedelta(), "UTC") + + +def parse_rfc3339(string: str) -> Union[datetime, date, time]: + m = RFC_3339_DATETIME.match(string) + if m: + year = int(m.group(1)) + month = int(m.group(2)) + day = int(m.group(3)) + hour = int(m.group(4)) + minute = int(m.group(5)) + second = int(m.group(6)) + microsecond = 0 + + if m.group(7): + microsecond = int((f"{m.group(8):<06s}")[:6]) + + if m.group(9): + # Timezone + tz = m.group(9) + if tz.upper() == "Z": + tzinfo = _utc + else: + sign = m.group(11)[0] + hour_offset, minute_offset = int(m.group(12)), int(m.group(13)) + offset = timedelta(seconds=hour_offset * 3600 + minute_offset * 60) + if sign == "-": + offset = -offset + + tzinfo = timezone(offset, f"{sign}{m.group(12)}:{m.group(13)}") + + return datetime( + year, month, day, hour, minute, second, microsecond, tzinfo=tzinfo + ) + else: + return datetime(year, month, day, hour, minute, second, microsecond) + + m = RFC_3339_DATE.match(string) + if m: + year = int(m.group(1)) + month = int(m.group(2)) + day = int(m.group(3)) + + return date(year, month, day) + + m = RFC_3339_TIME.match(string) + if m: + hour = int(m.group(1)) + minute = int(m.group(2)) + second = int(m.group(3)) + microsecond = 0 + + if m.group(4): + microsecond = int((f"{m.group(5):<06s}")[:6]) + + return time(hour, minute, second, microsecond) + + raise ValueError("Invalid RFC 339 string") + + +# https://toml.io/en/v1.0.0#string +CONTROL_CHARS = frozenset(chr(c) for c in range(0x20)) | {chr(0x7F)} +_escaped = { + "b": "\b", + "t": "\t", + "n": "\n", + "f": "\f", + "r": "\r", + '"': '"', + "\\": "\\", +} +_compact_escapes = { + **{v: f"\\{k}" for k, v in _escaped.items()}, + '"""': '""\\"', +} +_basic_escapes = CONTROL_CHARS | {'"', "\\"} + + +def _unicode_escape(seq: str) -> str: + return "".join(f"\\u{ord(c):04x}" for c in seq) + + +def escape_string(s: str, escape_sequences: Collection[str] = _basic_escapes) -> str: + s = decode(s) + + res = [] + start = 0 + + def flush(inc=1): + if start != i: + res.append(s[start:i]) + + return i + inc + + i = 0 + while i < len(s): + for seq in escape_sequences: + seq_len = len(seq) + if s[i:].startswith(seq): + start = flush(seq_len) + res.append(_compact_escapes.get(seq) or _unicode_escape(seq)) + i += seq_len - 1 # fast-forward escape sequence + i += 1 + + flush() + + return "".join(res) + + +def merge_dicts(d1: dict, d2: dict) -> dict: + for k, v in d2.items(): + if k in d1 and isinstance(d1[k], dict) and isinstance(v, Mapping): + merge_dicts(d1[k], v) + else: + d1[k] = d2[k] diff --git a/src/poetry/core/_vendor/tomlkit/api.py b/src/poetry/core/_vendor/tomlkit/api.py new file mode 100644 index 0000000..ed48ca9 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/api.py @@ -0,0 +1,287 @@ +import datetime as _datetime + +from collections.abc import Mapping +from typing import IO +from typing import Iterable +from typing import Optional +from typing import Tuple +from typing import Union + +from tomlkit._utils import parse_rfc3339 +from tomlkit.container import Container +from tomlkit.exceptions import UnexpectedCharError +from tomlkit.items import AoT +from tomlkit.items import Array +from tomlkit.items import Bool +from tomlkit.items import Comment +from tomlkit.items import Date +from tomlkit.items import DateTime +from tomlkit.items import DottedKey +from tomlkit.items import Float +from tomlkit.items import InlineTable +from tomlkit.items import Integer +from tomlkit.items import Item as _Item +from tomlkit.items import Key +from tomlkit.items import SingleKey +from tomlkit.items import String +from tomlkit.items import StringType as _StringType +from tomlkit.items import Table +from tomlkit.items import Time +from tomlkit.items import Trivia +from tomlkit.items import Whitespace +from tomlkit.items import item +from tomlkit.parser import Parser +from tomlkit.toml_document import TOMLDocument + + +def loads(string: Union[str, bytes]) -> TOMLDocument: + """ + Parses a string into a TOMLDocument. + + Alias for parse(). + """ + return parse(string) + + +def dumps(data: Mapping, sort_keys: bool = False) -> str: + """ + Dumps a TOMLDocument into a string. + """ + if not isinstance(data, Container) and isinstance(data, Mapping): + data = item(dict(data), _sort_keys=sort_keys) + + try: + # data should be a `Container` (and therefore implement `as_string`) + # for all type safe invocations of this function + return data.as_string() # type: ignore[attr-defined] + except AttributeError as ex: + msg = f"Expecting Mapping or TOML Container, {type(data)} given" + raise TypeError(msg) from ex + + +def load(fp: IO) -> TOMLDocument: + """ + Load toml document from a file-like object. + """ + return parse(fp.read()) + + +def dump(data: Mapping, fp: IO[str], *, sort_keys: bool = False) -> None: + """ + Dump a TOMLDocument into a writable file stream. + + :param data: a dict-like object to dump + :param sort_keys: if true, sort the keys in alphabetic order + """ + fp.write(dumps(data, sort_keys=sort_keys)) + + +def parse(string: Union[str, bytes]) -> TOMLDocument: + """ + Parses a string or bytes into a TOMLDocument. + """ + return Parser(string).parse() + + +def document() -> TOMLDocument: + """ + Returns a new TOMLDocument instance. + """ + return TOMLDocument() + + +# Items +def integer(raw: Union[str, int]) -> Integer: + """Create an integer item from a number or string.""" + return item(int(raw)) + + +def float_(raw: Union[str, float]) -> Float: + """Create an float item from a number or string.""" + return item(float(raw)) + + +def boolean(raw: str) -> Bool: + """Turn `true` or `false` into a boolean item.""" + return item(raw == "true") + + +def string( + raw: str, + *, + literal: bool = False, + multiline: bool = False, + escape: bool = True, +) -> String: + """Create a string item. + + By default, this function will create *single line basic* strings, but + boolean flags (e.g. ``literal=True`` and/or ``multiline=True``) + can be used for personalization. + + For more information, please check the spec: `https://toml.io/en/v1.0.0#string`_. + + Common escaping rules will be applied for basic strings. + This can be controlled by explicitly setting ``escape=False``. + Please note that, if you disable escaping, you will have to make sure that + the given strings don't contain any forbidden character or sequence. + """ + type_ = _StringType.select(literal, multiline) + return String.from_raw(raw, type_, escape) + + +def date(raw: str) -> Date: + """Create a TOML date.""" + value = parse_rfc3339(raw) + if not isinstance(value, _datetime.date): + raise ValueError("date() only accepts date strings.") + + return item(value) + + +def time(raw: str) -> Time: + """Create a TOML time.""" + value = parse_rfc3339(raw) + if not isinstance(value, _datetime.time): + raise ValueError("time() only accepts time strings.") + + return item(value) + + +def datetime(raw: str) -> DateTime: + """Create a TOML datetime.""" + value = parse_rfc3339(raw) + if not isinstance(value, _datetime.datetime): + raise ValueError("datetime() only accepts datetime strings.") + + return item(value) + + +def array(raw: str = None) -> Array: + """Create an array item for its string representation. + + :Example: + + >>> array("[1, 2, 3]") # Create from a string + [1, 2, 3] + >>> a = array() + >>> a.extend([1, 2, 3]) # Create from a list + >>> a + [1, 2, 3] + """ + if raw is None: + raw = "[]" + + return value(raw) + + +def table(is_super_table: Optional[bool] = None) -> Table: + """Create an empty table. + + :param is_super_table: if true, the table is a super table + + :Example: + + >>> doc = document() + >>> foo = table(True) + >>> bar = table() + >>> bar.update({'x': 1}) + >>> foo.append('bar', bar) + >>> doc.append('foo', foo) + >>> print(doc.as_string()) + [foo.bar] + x = 1 + """ + return Table(Container(), Trivia(), False, is_super_table) + + +def inline_table() -> InlineTable: + """Create an inline table. + + :Example: + + >>> table = inline_table() + >>> table.update({'x': 1, 'y': 2}) + >>> print(table.as_string()) + {x = 1, y = 2} + """ + return InlineTable(Container(), Trivia(), new=True) + + +def aot() -> AoT: + """Create an array of table. + + :Example: + + >>> doc = document() + >>> aot = aot() + >>> aot.append(item({'x': 1})) + >>> doc.append('foo', aot) + >>> print(doc.as_string()) + [[foo]] + x = 1 + """ + return AoT([]) + + +def key(k: Union[str, Iterable[str]]) -> Key: + """Create a key from a string. When a list of string is given, + it will create a dotted key. + + :Example: + + >>> doc = document() + >>> doc.append(key('foo'), 1) + >>> doc.append(key(['bar', 'baz']), 2) + >>> print(doc.as_string()) + foo = 1 + bar.baz = 2 + """ + if isinstance(k, str): + return SingleKey(k) + return DottedKey([key(_k) for _k in k]) + + +def value(raw: str) -> _Item: + """Parse a simple value from a string. + + :Example: + + >>> value("1") + 1 + >>> value("true") + True + >>> value("[1, 2, 3]") + [1, 2, 3] + """ + parser = Parser(raw) + v = parser._parse_value() + if not parser.end(): + raise parser.parse_error(UnexpectedCharError, char=parser._current) + return v + + +def key_value(src: str) -> Tuple[Key, _Item]: + """Parse a key-value pair from a string. + + :Example: + + >>> key_value("foo = 1") + (Key('foo'), 1) + """ + return Parser(src)._parse_key_value() + + +def ws(src: str) -> Whitespace: + """Create a whitespace from a string.""" + return Whitespace(src, fixed=True) + + +def nl() -> Whitespace: + """Create a newline item.""" + return ws("\n") + + +def comment(string: str) -> Comment: + """Create a comment item.""" + return Comment(Trivia(comment_ws=" ", comment="# " + string)) diff --git a/src/poetry/core/_vendor/tomlkit/container.py b/src/poetry/core/_vendor/tomlkit/container.py new file mode 100644 index 0000000..4b40a13 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/container.py @@ -0,0 +1,907 @@ +import copy + +from typing import Any +from typing import Dict +from typing import Iterator +from typing import List +from typing import Optional +from typing import Tuple +from typing import Union + +from tomlkit._compat import decode +from tomlkit._utils import merge_dicts +from tomlkit.exceptions import KeyAlreadyPresent +from tomlkit.exceptions import NonExistentKey +from tomlkit.exceptions import TOMLKitError +from tomlkit.items import AoT +from tomlkit.items import Comment +from tomlkit.items import Item +from tomlkit.items import Key +from tomlkit.items import Null +from tomlkit.items import SingleKey +from tomlkit.items import Table +from tomlkit.items import Trivia +from tomlkit.items import Whitespace +from tomlkit.items import _CustomDict +from tomlkit.items import item as _item + + +_NOT_SET = object() + + +class Container(_CustomDict): + """ + A container for items within a TOMLDocument. + + This class implements the `dict` interface with copy/deepcopy protocol. + """ + + def __init__(self, parsed: bool = False) -> None: + self._map: Dict[Key, int] = {} + self._body: List[Tuple[Optional[Key], Item]] = [] + self._parsed = parsed + self._table_keys = [] + + @property + def body(self) -> List[Tuple[Optional[Key], Item]]: + return self._body + + def unwrap(self) -> Dict[str, Any]: + unwrapped = {} + for k, v in self.items(): + if k is None: + continue + + if isinstance(k, Key): + k = k.key + + if isinstance(v, Item): + v = v.unwrap() + + if k in unwrapped: + merge_dicts(unwrapped[k], v) + else: + unwrapped[k] = v + + return unwrapped + + @property + def value(self) -> Dict[str, Any]: + d = {} + for k, v in self._body: + if k is None: + continue + + k = k.key + v = v.value + + if isinstance(v, Container): + v = v.value + + if k in d: + merge_dicts(d[k], v) + else: + d[k] = v + + return d + + def parsing(self, parsing: bool) -> None: + self._parsed = parsing + + for _, v in self._body: + if isinstance(v, Table): + v.value.parsing(parsing) + elif isinstance(v, AoT): + for t in v.body: + t.value.parsing(parsing) + + def add( + self, key: Union[Key, Item, str], item: Optional[Item] = None + ) -> "Container": + """ + Adds an item to the current Container. + + :Example: + + >>> # add a key-value pair + >>> doc.add('key', 'value') + >>> # add a comment or whitespace or newline + >>> doc.add(comment('# comment')) + """ + if item is None: + if not isinstance(key, (Comment, Whitespace)): + raise ValueError( + "Non comment/whitespace items must have an associated key" + ) + + key, item = None, key + + return self.append(key, item) + + def _handle_dotted_key(self, key: Key, value: Item) -> None: + names = tuple(iter(key)) + name = names[0] + name._dotted = True + if name in self: + if not isinstance(value, Table): + table = Table(Container(True), Trivia(), False, is_super_table=True) + _table = table + for i, _name in enumerate(names[1:]): + if i == len(names) - 2: + _name.sep = key.sep + + _table.append(_name, value) + else: + _name._dotted = True + _table.append( + _name, + Table( + Container(True), + Trivia(), + False, + is_super_table=i < len(names) - 2, + ), + ) + + _table = _table[_name] + + value = table + + self.append(name, value) + + return + else: + table = Table(Container(True), Trivia(), False, is_super_table=True) + self.append(name, table) + + for i, _name in enumerate(names[1:]): + if i == len(names) - 2: + _name.sep = key.sep + + table.append(_name, value) + else: + _name._dotted = True + if _name in table.value: + table = table.value[_name] + else: + table.append( + _name, + Table( + Container(True), + Trivia(), + False, + is_super_table=i < len(names) - 2, + ), + ) + + table = table[_name] + + def append(self, key: Union[Key, str, None], item: Item) -> "Container": + """Similar to :meth:`add` but both key and value must be given.""" + if not isinstance(key, Key) and key is not None: + key = SingleKey(key) + + if not isinstance(item, Item): + item = _item(item) + + if key is not None and key.is_multi(): + self._handle_dotted_key(key, item) + return self + + if isinstance(item, (AoT, Table)) and item.name is None: + item.name = key.key + + prev = self._previous_item() + prev_ws = isinstance(prev, Whitespace) or ends_with_whitespace(prev) + if isinstance(item, Table): + if not self._parsed: + item.invalidate_display_name() + if self._body and not (self._parsed or item.trivia.indent or prev_ws): + item.trivia.indent = "\n" + + if isinstance(item, AoT) and self._body and not self._parsed: + item.invalidate_display_name() + if item and not ("\n" in item[0].trivia.indent or prev_ws): + item[0].trivia.indent = "\n" + item[0].trivia.indent + + if key is not None and key in self: + current_idx = self._map[key] + if isinstance(current_idx, tuple): + current_body_element = self._body[current_idx[-1]] + else: + current_body_element = self._body[current_idx] + + current = current_body_element[1] + + if isinstance(item, Table): + if not isinstance(current, (Table, AoT)): + raise KeyAlreadyPresent(key) + + if item.is_aot_element(): + # New AoT element found later on + # Adding it to the current AoT + if not isinstance(current, AoT): + current = AoT([current, item], parsed=self._parsed) + + self._replace(key, key, current) + else: + current.append(item) + + return self + elif current.is_aot(): + if not item.is_aot_element(): + # Tried to define a table after an AoT with the same name. + raise KeyAlreadyPresent(key) + + current.append(item) + + return self + elif current.is_super_table(): + if item.is_super_table(): + # We need to merge both super tables + if ( + self._table_keys[-1] != current_body_element[0] + or key.is_dotted() + or current_body_element[0].is_dotted() + ): + if not isinstance(current_idx, tuple): + current_idx = (current_idx,) + + self._map[key] = current_idx + (len(self._body),) + self._body.append((key, item)) + self._table_keys.append(key) + + # Building a temporary proxy to check for errors + OutOfOrderTableProxy(self, self._map[key]) + + return self + + # Create a new element to replace the old one + current = copy.deepcopy(current) + for k, v in item.value.body: + current.append(k, v) + self._body[ + current_idx[-1] + if isinstance(current_idx, tuple) + else current_idx + ] = (current_body_element[0], current) + + return self + elif current_body_element[0].is_dotted(): + raise TOMLKitError("Redefinition of an existing table") + elif not item.is_super_table(): + raise KeyAlreadyPresent(key) + elif isinstance(item, AoT): + if not isinstance(current, AoT): + # Tried to define an AoT after a table with the same name. + raise KeyAlreadyPresent(key) + + for table in item.body: + current.append(table) + + return self + else: + raise KeyAlreadyPresent(key) + + is_table = isinstance(item, (Table, AoT)) + if key is not None and self._body and not self._parsed: + # If there is already at least one table in the current container + # and the given item is not a table, we need to find the last + # item that is not a table and insert after it + # If no such item exists, insert at the top of the table + key_after = None + for i, (k, v) in enumerate(self._body): + if isinstance(v, Null): + continue # Null elements are inserted after deletion + + if isinstance(v, Whitespace) and not v.is_fixed(): + continue + + if not is_table and isinstance(v, (Table, AoT)): + break + + key_after = k or i # last scalar, Array or InlineTable value + + if key_after is not None: + if isinstance(key_after, int): + if key_after + 1 < len(self._body): + return self._insert_at(key_after + 1, key, item) + else: + previous_item = self._body[-1][1] + if not ( + isinstance(previous_item, Whitespace) + or ends_with_whitespace(previous_item) + or is_table + or "\n" in previous_item.trivia.trail + ): + previous_item.trivia.trail += "\n" + else: + return self._insert_after(key_after, key, item) + else: + return self._insert_at(0, key, item) + + if key in self._map: + current_idx = self._map[key] + if isinstance(current_idx, tuple): + current_idx = current_idx[-1] + + current = self._body[current_idx][1] + if key is not None and not isinstance(current, Table): + raise KeyAlreadyPresent(key) + + # Adding sub tables to a currently existing table + if not isinstance(current_idx, tuple): + current_idx = (current_idx,) + + self._map[key] = current_idx + (len(self._body),) + else: + self._map[key] = len(self._body) + + self._body.append((key, item)) + if item.is_table(): + self._table_keys.append(key) + + if key is not None: + dict.__setitem__(self, key.key, item.value) + + return self + + def _remove_at(self, idx: int) -> None: + key = self._body[idx][0] + index = self._map.get(key) + if index is None: + raise NonExistentKey(key) + self._body[idx] = (None, Null()) + + if isinstance(index, tuple): + index = list(index) + index.remove(idx) + if len(index) == 1: + index = index.pop() + else: + index = tuple(index) + self._map[key] = index + else: + dict.__delitem__(self, key.key) + self._map.pop(key) + + def remove(self, key: Union[Key, str]) -> "Container": + """Remove a key from the container.""" + if not isinstance(key, Key): + key = SingleKey(key) + + idx = self._map.pop(key, None) + if idx is None: + raise NonExistentKey(key) + + if isinstance(idx, tuple): + for i in idx: + self._body[i] = (None, Null()) + else: + self._body[idx] = (None, Null()) + + dict.__delitem__(self, key.key) + + return self + + def _insert_after( + self, key: Union[Key, str], other_key: Union[Key, str], item: Any + ) -> "Container": + if key is None: + raise ValueError("Key cannot be null in insert_after()") + + if key not in self: + raise NonExistentKey(key) + + if not isinstance(key, Key): + key = SingleKey(key) + + if not isinstance(other_key, Key): + other_key = SingleKey(other_key) + + item = _item(item) + + idx = self._map[key] + # Insert after the max index if there are many. + if isinstance(idx, tuple): + idx = max(idx) + current_item = self._body[idx][1] + if "\n" not in current_item.trivia.trail: + current_item.trivia.trail += "\n" + + # Increment indices after the current index + for k, v in self._map.items(): + if isinstance(v, tuple): + new_indices = [] + for v_ in v: + if v_ > idx: + v_ = v_ + 1 + + new_indices.append(v_) + + self._map[k] = tuple(new_indices) + elif v > idx: + self._map[k] = v + 1 + + self._map[other_key] = idx + 1 + self._body.insert(idx + 1, (other_key, item)) + + if key is not None: + dict.__setitem__(self, other_key.key, item.value) + + return self + + def _insert_at(self, idx: int, key: Union[Key, str], item: Any) -> "Container": + if idx > len(self._body) - 1: + raise ValueError(f"Unable to insert at position {idx}") + + if not isinstance(key, Key): + key = SingleKey(key) + + item = _item(item) + + if idx > 0: + previous_item = self._body[idx - 1][1] + if not ( + isinstance(previous_item, Whitespace) + or ends_with_whitespace(previous_item) + or isinstance(item, (AoT, Table)) + or "\n" in previous_item.trivia.trail + ): + previous_item.trivia.trail += "\n" + + # Increment indices after the current index + for k, v in self._map.items(): + if isinstance(v, tuple): + new_indices = [] + for v_ in v: + if v_ >= idx: + v_ = v_ + 1 + + new_indices.append(v_) + + self._map[k] = tuple(new_indices) + elif v >= idx: + self._map[k] = v + 1 + + self._map[key] = idx + self._body.insert(idx, (key, item)) + + if key is not None: + dict.__setitem__(self, key.key, item.value) + + return self + + def item(self, key: Union[Key, str]) -> Item: + """Get an item for the given key.""" + if not isinstance(key, Key): + key = SingleKey(key) + + idx = self._map.get(key, None) + if idx is None: + raise NonExistentKey(key) + + if isinstance(idx, tuple): + # The item we are getting is an out of order table + # so we need a proxy to retrieve the proper objects + # from the parent container + return OutOfOrderTableProxy(self, idx) + + return self._body[idx][1] + + def last_item(self) -> Optional[Item]: + """Get the last item.""" + if self._body: + return self._body[-1][1] + + def as_string(self) -> str: + """Render as TOML string.""" + s = "" + for k, v in self._body: + if k is not None: + if isinstance(v, Table): + s += self._render_table(k, v) + elif isinstance(v, AoT): + s += self._render_aot(k, v) + else: + s += self._render_simple_item(k, v) + else: + s += self._render_simple_item(k, v) + + return s + + def _render_table( + self, key: Key, table: Table, prefix: Optional[str] = None + ) -> str: + cur = "" + + if table.display_name is not None: + _key = table.display_name + else: + _key = key.as_string() + + if prefix is not None: + _key = prefix + "." + _key + + if not table.is_super_table() or ( + any( + not isinstance(v, (Table, AoT, Whitespace, Null)) + for _, v in table.value.body + ) + and not key.is_dotted() + ): + open_, close = "[", "]" + if table.is_aot_element(): + open_, close = "[[", "]]" + + newline_in_table_trivia = ( + "\n" if "\n" not in table.trivia.trail and len(table.value) > 0 else "" + ) + cur += ( + f"{table.trivia.indent}" + f"{open_}" + f"{decode(_key)}" + f"{close}" + f"{table.trivia.comment_ws}" + f"{decode(table.trivia.comment)}" + f"{table.trivia.trail}" + f"{newline_in_table_trivia}" + ) + elif table.trivia.indent == "\n": + cur += table.trivia.indent + + for k, v in table.value.body: + if isinstance(v, Table): + if v.is_super_table(): + if k.is_dotted() and not key.is_dotted(): + # Dotted key inside table + cur += self._render_table(k, v) + else: + cur += self._render_table(k, v, prefix=_key) + else: + cur += self._render_table(k, v, prefix=_key) + elif isinstance(v, AoT): + cur += self._render_aot(k, v, prefix=_key) + else: + cur += self._render_simple_item( + k, v, prefix=_key if key.is_dotted() else None + ) + + return cur + + def _render_aot(self, key, aot, prefix=None): + _key = key.as_string() + if prefix is not None: + _key = prefix + "." + _key + + cur = "" + _key = decode(_key) + for table in aot.body: + cur += self._render_aot_table(table, prefix=_key) + + return cur + + def _render_aot_table(self, table: Table, prefix: Optional[str] = None) -> str: + cur = "" + + _key = prefix or "" + + if not table.is_super_table(): + open_, close = "[[", "]]" + + cur += ( + f"{table.trivia.indent}" + f"{open_}" + f"{decode(_key)}" + f"{close}" + f"{table.trivia.comment_ws}" + f"{decode(table.trivia.comment)}" + f"{table.trivia.trail}" + ) + + for k, v in table.value.body: + if isinstance(v, Table): + if v.is_super_table(): + if k.is_dotted(): + # Dotted key inside table + cur += self._render_table(k, v) + else: + cur += self._render_table(k, v, prefix=_key) + else: + cur += self._render_table(k, v, prefix=_key) + elif isinstance(v, AoT): + cur += self._render_aot(k, v, prefix=_key) + else: + cur += self._render_simple_item(k, v) + + return cur + + def _render_simple_item(self, key, item, prefix=None): + if key is None: + return item.as_string() + + _key = key.as_string() + if prefix is not None: + _key = prefix + "." + _key + + return ( + f"{item.trivia.indent}" + f"{decode(_key)}" + f"{key.sep}" + f"{decode(item.as_string())}" + f"{item.trivia.comment_ws}" + f"{decode(item.trivia.comment)}" + f"{item.trivia.trail}" + ) + + def __len__(self) -> int: + return dict.__len__(self) + + def __iter__(self) -> Iterator[str]: + return iter(dict.keys(self)) + + # Dictionary methods + def __getitem__(self, key: Union[Key, str]) -> Union[Item, "Container"]: + if not isinstance(key, Key): + key = SingleKey(key) + + idx = self._map.get(key, None) + if idx is None: + raise NonExistentKey(key) + + if isinstance(idx, tuple): + # The item we are getting is an out of order table + # so we need a proxy to retrieve the proper objects + # from the parent container + return OutOfOrderTableProxy(self, idx) + + item = self._body[idx][1] + if item.is_boolean(): + return item.value + + return item + + def __setitem__(self, key: Union[Key, str], value: Any) -> None: + if key is not None and key in self: + old_key = next(filter(lambda k: k == key, self._map)) + self._replace(old_key, key, value) + else: + self.append(key, value) + + def __delitem__(self, key: Union[Key, str]) -> None: + self.remove(key) + + def setdefault(self, key: Union[Key, str], default: Any) -> Any: + super().setdefault(key, default=default) + return self[key] + + def _replace( + self, key: Union[Key, str], new_key: Union[Key, str], value: Item + ) -> None: + if not isinstance(key, Key): + key = SingleKey(key) + + idx = self._map.get(key, None) + if idx is None: + raise NonExistentKey(key) + + self._replace_at(idx, new_key, value) + + def _replace_at( + self, idx: Union[int, Tuple[int]], new_key: Union[Key, str], value: Item + ) -> None: + value = _item(value) + + if isinstance(idx, tuple): + for i in idx[1:]: + self._body[i] = (None, Null()) + + idx = idx[0] + + k, v = self._body[idx] + if not isinstance(new_key, Key): + if ( + isinstance(value, (AoT, Table)) != isinstance(v, (AoT, Table)) + or new_key != k.key + ): + new_key = SingleKey(new_key) + else: # Inherit the sep of the old key + new_key = k + + del self._map[k] + self._map[new_key] = idx + if new_key != k: + dict.__delitem__(self, k) + + if isinstance(value, (AoT, Table)) != isinstance(v, (AoT, Table)): + # new tables should appear after all non-table values + self.remove(k) + for i in range(idx, len(self._body)): + if isinstance(self._body[i][1], (AoT, Table)): + self._insert_at(i, new_key, value) + idx = i + break + else: + idx = -1 + self.append(new_key, value) + else: + # Copying trivia + if not isinstance(value, (Whitespace, AoT)): + value.trivia.indent = v.trivia.indent + value.trivia.comment_ws = value.trivia.comment_ws or v.trivia.comment_ws + value.trivia.comment = value.trivia.comment or v.trivia.comment + value.trivia.trail = v.trivia.trail + self._body[idx] = (new_key, value) + + if hasattr(value, "invalidate_display_name"): + value.invalidate_display_name() # type: ignore[attr-defined] + + if isinstance(value, Table): + # Insert a cosmetic new line for tables if: + # - it does not have it yet OR is not followed by one + # - it is not the last item + last, _ = self._previous_item_with_index() + idx = last if idx < 0 else idx + has_ws = ends_with_whitespace(value) + next_ws = idx < last and isinstance(self._body[idx + 1][1], Whitespace) + if idx < last and not (next_ws or has_ws): + value.append(None, Whitespace("\n")) + + dict.__setitem__(self, new_key.key, value.value) + + def __str__(self) -> str: + return str(self.value) + + def __repr__(self) -> str: + return repr(self.value) + + def __eq__(self, other: dict) -> bool: + if not isinstance(other, dict): + return NotImplemented + + return self.value == other + + def _getstate(self, protocol): + return (self._parsed,) + + def __reduce__(self): + return self.__reduce_ex__(2) + + def __reduce_ex__(self, protocol): + return ( + self.__class__, + self._getstate(protocol), + (self._map, self._body, self._parsed, self._table_keys), + ) + + def __setstate__(self, state): + self._map = state[0] + self._body = state[1] + self._parsed = state[2] + self._table_keys = state[3] + + for key, item in self._body: + if key is not None: + dict.__setitem__(self, key.key, item.value) + + def copy(self) -> "Container": + return copy.copy(self) + + def __copy__(self) -> "Container": + c = self.__class__(self._parsed) + for k, v in dict.items(self): + dict.__setitem__(c, k, v) + + c._body += self.body + c._map.update(self._map) + + return c + + def _previous_item_with_index( + self, idx: Optional[int] = None, ignore=(Null,) + ) -> Optional[Tuple[int, Item]]: + """Find the immediate previous item before index ``idx``""" + if idx is None or idx > len(self._body): + idx = len(self._body) + for i in range(idx - 1, -1, -1): + v = self._body[i][-1] + if not isinstance(v, ignore): + return i, v + return None + + def _previous_item( + self, idx: Optional[int] = None, ignore=(Null,) + ) -> Optional[Item]: + """Find the immediate previous item before index ``idx``. + If ``idx`` is not given, the last item is returned. + """ + prev = self._previous_item_with_index(idx, ignore) + return prev[-1] if prev else None + + +class OutOfOrderTableProxy(_CustomDict): + def __init__(self, container: Container, indices: Tuple[int]) -> None: + self._container = container + self._internal_container = Container(True) + self._tables = [] + self._tables_map = {} + + for i in indices: + _, item = self._container._body[i] + + if isinstance(item, Table): + self._tables.append(item) + table_idx = len(self._tables) - 1 + for k, v in item.value.body: + self._internal_container.append(k, v) + self._tables_map[k] = table_idx + if k is not None: + dict.__setitem__(self, k.key, v) + + def unwrap(self) -> str: + return self._internal_container.unwrap() + + @property + def value(self): + return self._internal_container.value + + def __getitem__(self, key: Union[Key, str]) -> Any: + if key not in self._internal_container: + raise NonExistentKey(key) + + return self._internal_container[key] + + def __setitem__(self, key: Union[Key, str], item: Any) -> None: + if key in self._tables_map: + table = self._tables[self._tables_map[key]] + table[key] = item + elif self._tables: + table = self._tables[0] + table[key] = item + else: + self._container[key] = item + + self._internal_container[key] = item + if key is not None: + dict.__setitem__(self, key, item) + + def _remove_table(self, table: Table) -> None: + """Remove table from the parent container""" + self._tables.remove(table) + for idx, item in enumerate(self._container._body): + if item[1] is table: + self._container._remove_at(idx) + break + + def __delitem__(self, key: Union[Key, str]) -> None: + if key in self._tables_map: + table = self._tables[self._tables_map[key]] + del table[key] + if not table and len(self._tables) > 1: + self._remove_table(table) + del self._tables_map[key] + else: + raise NonExistentKey(key) + + del self._internal_container[key] + if key is not None: + dict.__delitem__(self, key) + + def __iter__(self) -> Iterator[str]: + return iter(dict.keys(self)) + + def __len__(self) -> int: + return dict.__len__(self) + + def setdefault(self, key: Union[Key, str], default: Any) -> Any: + super().setdefault(key, default=default) + return self[key] + + +def ends_with_whitespace(it: Any) -> bool: + """Returns ``True`` if the given item ``it`` is a ``Table`` or ``AoT`` object + ending with a ``Whitespace``. + """ + return ( + isinstance(it, Table) and isinstance(it.value._previous_item(), Whitespace) + ) or (isinstance(it, AoT) and len(it) > 0 and isinstance(it[-1], Whitespace)) diff --git a/src/poetry/core/_vendor/tomlkit/exceptions.py b/src/poetry/core/_vendor/tomlkit/exceptions.py new file mode 100644 index 0000000..3147ca2 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/exceptions.py @@ -0,0 +1,227 @@ +from typing import Collection +from typing import Optional + + +class TOMLKitError(Exception): + + pass + + +class ParseError(ValueError, TOMLKitError): + """ + This error occurs when the parser encounters a syntax error + in the TOML being parsed. The error references the line and + location within the line where the error was encountered. + """ + + def __init__(self, line: int, col: int, message: Optional[str] = None) -> None: + self._line = line + self._col = col + + if message is None: + message = "TOML parse error" + + super().__init__(f"{message} at line {self._line} col {self._col}") + + @property + def line(self): + return self._line + + @property + def col(self): + return self._col + + +class MixedArrayTypesError(ParseError): + """ + An array was found that had two or more element types. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Mixed types found in array" + + super().__init__(line, col, message=message) + + +class InvalidNumberError(ParseError): + """ + A numeric field was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid number" + + super().__init__(line, col, message=message) + + +class InvalidDateTimeError(ParseError): + """ + A datetime field was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid datetime" + + super().__init__(line, col, message=message) + + +class InvalidDateError(ParseError): + """ + A date field was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid date" + + super().__init__(line, col, message=message) + + +class InvalidTimeError(ParseError): + """ + A date field was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid time" + + super().__init__(line, col, message=message) + + +class InvalidNumberOrDateError(ParseError): + """ + A numeric or date field was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid number or date format" + + super().__init__(line, col, message=message) + + +class InvalidUnicodeValueError(ParseError): + """ + A unicode code was improperly specified. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Invalid unicode value" + + super().__init__(line, col, message=message) + + +class UnexpectedCharError(ParseError): + """ + An unexpected character was found during parsing. + """ + + def __init__(self, line: int, col: int, char: str) -> None: + message = f"Unexpected character: {repr(char)}" + + super().__init__(line, col, message=message) + + +class EmptyKeyError(ParseError): + """ + An empty key was found during parsing. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Empty key" + + super().__init__(line, col, message=message) + + +class EmptyTableNameError(ParseError): + """ + An empty table name was found during parsing. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Empty table name" + + super().__init__(line, col, message=message) + + +class InvalidCharInStringError(ParseError): + """ + The string being parsed contains an invalid character. + """ + + def __init__(self, line: int, col: int, char: str) -> None: + message = f"Invalid character {repr(char)} in string" + + super().__init__(line, col, message=message) + + +class UnexpectedEofError(ParseError): + """ + The TOML being parsed ended before the end of a statement. + """ + + def __init__(self, line: int, col: int) -> None: + message = "Unexpected end of file" + + super().__init__(line, col, message=message) + + +class InternalParserError(ParseError): + """ + An error that indicates a bug in the parser. + """ + + def __init__(self, line: int, col: int, message: Optional[str] = None) -> None: + msg = "Internal parser error" + if message: + msg += f" ({message})" + + super().__init__(line, col, message=msg) + + +class NonExistentKey(KeyError, TOMLKitError): + """ + A non-existent key was used. + """ + + def __init__(self, key): + message = f'Key "{key}" does not exist.' + + super().__init__(message) + + +class KeyAlreadyPresent(TOMLKitError): + """ + An already present key was used. + """ + + def __init__(self, key): + key = getattr(key, "key", key) + message = f'Key "{key}" already exists.' + + super().__init__(message) + + +class InvalidControlChar(ParseError): + def __init__(self, line: int, col: int, char: int, type: str) -> None: + display_code = "\\u00" + + if char < 16: + display_code += "0" + + display_code += hex(char)[2:] + + message = ( + "Control characters (codes less than 0x1f and 0x7f)" + f" are not allowed in {type}, " + f"use {display_code} instead" + ) + + super().__init__(line, col, message=message) + + +class InvalidStringError(ValueError, TOMLKitError): + def __init__(self, value: str, invalid_sequences: Collection[str], delimiter: str): + repr_ = repr(value)[1:-1] + super().__init__( + f"Invalid string: {delimiter}{repr_}{delimiter}. " + f"The character sequences {invalid_sequences} are invalid." + ) diff --git a/src/poetry/core/_vendor/tomlkit/items.py b/src/poetry/core/_vendor/tomlkit/items.py new file mode 100644 index 0000000..77fa27d --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/items.py @@ -0,0 +1,1950 @@ +import abc +import copy +import re +import string + +from datetime import date +from datetime import datetime +from datetime import time +from datetime import tzinfo +from enum import Enum +from typing import TYPE_CHECKING +from typing import Any +from typing import Collection +from typing import Dict +from typing import Iterable +from typing import Iterator +from typing import List +from typing import Optional +from typing import Sequence +from typing import TypeVar +from typing import Union +from typing import cast +from typing import overload + +from tomlkit._compat import PY38 +from tomlkit._compat import decode +from tomlkit._utils import CONTROL_CHARS +from tomlkit._utils import escape_string +from tomlkit.exceptions import InvalidStringError + + +if TYPE_CHECKING: # pragma: no cover + # Define _CustomList and _CustomDict as a workaround for: + # https://github.com/python/mypy/issues/11427 + # + # According to this issue, the typeshed contains a "lie" + # (it adds MutableSequence to the ancestry of list and MutableMapping to + # the ancestry of dict) which completely messes with the type inference for + # Table, InlineTable, Array and Container. + # + # Importing from builtins is preferred over simple assignment, see issues: + # https://github.com/python/mypy/issues/8715 + # https://github.com/python/mypy/issues/10068 + from builtins import dict as _CustomDict # noqa: N812, TC004 + from builtins import list as _CustomList # noqa: N812, TC004 + + # Allow type annotations but break circular imports + from tomlkit import container +else: + from collections.abc import MutableMapping + from collections.abc import MutableSequence + + class _CustomList(MutableSequence, list): + """Adds MutableSequence mixin while pretending to be a builtin list""" + + class _CustomDict(MutableMapping, dict): + """Adds MutableMapping mixin while pretending to be a builtin dict""" + + +ItemT = TypeVar("ItemT", bound="Item") + + +@overload +def item( + value: bool, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Bool": + ... + + +@overload +def item( + value: int, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Integer": + ... + + +@overload +def item( + value: float, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Float": + ... + + +@overload +def item( + value: str, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "String": + ... + + +@overload +def item( + value: datetime, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "DateTime": + ... + + +@overload +def item( + value: date, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Date": + ... + + +@overload +def item( + value: time, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Time": + ... + + +@overload +def item( + value: Sequence[dict], _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "AoT": + ... + + +@overload +def item( + value: Sequence, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Array": + ... + + +@overload +def item(value: dict, _parent: "Array" = ..., _sort_keys: bool = ...) -> "InlineTable": + ... + + +@overload +def item( + value: dict, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> "Table": + ... + + +@overload +def item( + value: ItemT, _parent: Optional["Item"] = ..., _sort_keys: bool = ... +) -> ItemT: + ... + + +def item( + value: Any, _parent: Optional["Item"] = None, _sort_keys: bool = False +) -> "Item": + """Create a TOML item from a Python object. + + :Example: + + >>> item(42) + 42 + >>> item([1, 2, 3]) + [1, 2, 3] + >>> item({'a': 1, 'b': 2}) + a = 1 + b = 2 + """ + + from tomlkit.container import Container + + if isinstance(value, Item): + return value + + if isinstance(value, bool): + return Bool(value, Trivia()) + elif isinstance(value, int): + return Integer(value, Trivia(), str(value)) + elif isinstance(value, float): + return Float(value, Trivia(), str(value)) + elif isinstance(value, dict): + table_constructor = ( + InlineTable if isinstance(_parent, (Array, InlineTable)) else Table + ) + val = table_constructor(Container(), Trivia(), False) + for k, v in sorted( + value.items(), + key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1), + ): + val[k] = item(v, _parent=val, _sort_keys=_sort_keys) + + return val + elif isinstance(value, (list, tuple)): + if ( + value + and all(isinstance(v, dict) for v in value) + and (_parent is None or isinstance(_parent, Table)) + ): + a = AoT([]) + table_constructor = Table + else: + a = Array([], Trivia()) + table_constructor = InlineTable + + for v in value: + if isinstance(v, dict): + table = table_constructor(Container(), Trivia(), True) + + for k, _v in sorted( + v.items(), + key=lambda i: (isinstance(i[1], dict), i[0] if _sort_keys else 1), + ): + i = item(_v, _parent=table, _sort_keys=_sort_keys) + if isinstance(table, InlineTable): + i.trivia.trail = "" + + table[k] = i + + v = table + + a.append(v) + + return a + elif isinstance(value, str): + return String.from_raw(value) + elif isinstance(value, datetime): + return DateTime( + value.year, + value.month, + value.day, + value.hour, + value.minute, + value.second, + value.microsecond, + value.tzinfo, + Trivia(), + value.isoformat().replace("+00:00", "Z"), + ) + elif isinstance(value, date): + return Date(value.year, value.month, value.day, Trivia(), value.isoformat()) + elif isinstance(value, time): + return Time( + value.hour, + value.minute, + value.second, + value.microsecond, + value.tzinfo, + Trivia(), + value.isoformat(), + ) + + raise ValueError(f"Invalid type {type(value)}") + + +class StringType(Enum): + # Single Line Basic + SLB = '"' + # Multi Line Basic + MLB = '"""' + # Single Line Literal + SLL = "'" + # Multi Line Literal + MLL = "'''" + + @classmethod + def select(cls, literal=False, multiline=False) -> "StringType": + return { + (False, False): cls.SLB, + (False, True): cls.MLB, + (True, False): cls.SLL, + (True, True): cls.MLL, + }[(literal, multiline)] + + @property + def escaped_sequences(self) -> Collection[str]: + # https://toml.io/en/v1.0.0#string + escaped_in_basic = CONTROL_CHARS | {"\\"} + allowed_in_multiline = {"\n", "\r"} + return { + StringType.SLB: escaped_in_basic | {'"'}, + StringType.MLB: (escaped_in_basic | {'"""'}) - allowed_in_multiline, + StringType.SLL: (), + StringType.MLL: (), + }[self] + + @property + def invalid_sequences(self) -> Collection[str]: + # https://toml.io/en/v1.0.0#string + forbidden_in_literal = CONTROL_CHARS - {"\t"} + allowed_in_multiline = {"\n", "\r"} + return { + StringType.SLB: (), + StringType.MLB: (), + StringType.SLL: forbidden_in_literal | {"'"}, + StringType.MLL: (forbidden_in_literal | {"'''"}) - allowed_in_multiline, + }[self] + + @property + def unit(self) -> str: + return self.value[0] + + def is_basic(self) -> bool: + return self in {StringType.SLB, StringType.MLB} + + def is_literal(self) -> bool: + return self in {StringType.SLL, StringType.MLL} + + def is_singleline(self) -> bool: + return self in {StringType.SLB, StringType.SLL} + + def is_multiline(self) -> bool: + return self in {StringType.MLB, StringType.MLL} + + def toggle(self) -> "StringType": + return { + StringType.SLB: StringType.MLB, + StringType.MLB: StringType.SLB, + StringType.SLL: StringType.MLL, + StringType.MLL: StringType.SLL, + }[self] + + +class BoolType(Enum): + TRUE = "true" + FALSE = "false" + + def __bool__(self): + return {BoolType.TRUE: True, BoolType.FALSE: False}[self] + + def __iter__(self): + return iter(self.value) + + def __len__(self): + return len(self.value) + + +class Trivia: + """ + Trivia information (aka metadata). + """ + + def __init__( + self, + indent: str = None, + comment_ws: str = None, + comment: str = None, + trail: str = None, + ) -> None: + # Whitespace before a value. + self.indent = indent or "" + # Whitespace after a value, but before a comment. + self.comment_ws = comment_ws or "" + # Comment, starting with # character, or empty string if no comment. + self.comment = comment or "" + # Trailing newline. + if trail is None: + trail = "\n" + + self.trail = trail + + def copy(self) -> "Trivia": + return type(self)(self.indent, self.comment_ws, self.comment, self.trail) + + +class KeyType(Enum): + """ + The type of a Key. + + Keys can be bare (unquoted), or quoted using basic ("), or literal (') + quotes following the same escaping rules as single-line StringType. + """ + + Bare = "" + Basic = '"' + Literal = "'" + + +class Key(abc.ABC): + """Base class for a key""" + + sep: str + _original: str + _keys: List["SingleKey"] + _dotted: bool + key: str + + @abc.abstractmethod + def __hash__(self) -> int: + pass + + @abc.abstractmethod + def __eq__(self, __o: object) -> bool: + pass + + def is_dotted(self) -> bool: + """If the key is followed by other keys""" + return self._dotted + + def __iter__(self) -> Iterator["SingleKey"]: + return iter(self._keys) + + def concat(self, other: "Key") -> "DottedKey": + """Concatenate keys into a dotted key""" + keys = self._keys + other._keys + return DottedKey(keys, sep=self.sep) + + def is_multi(self) -> bool: + """Check if the key contains multiple keys""" + return len(self._keys) > 1 + + def as_string(self) -> str: + """The TOML representation""" + return self._original + + def __str__(self) -> str: + return self.as_string() + + def __repr__(self) -> str: + return f"" + + +class SingleKey(Key): + """A single key""" + + def __init__( + self, + k: str, + t: Optional[KeyType] = None, + sep: Optional[str] = None, + original: Optional[str] = None, + ) -> None: + if t is None: + if not k or any( + c not in string.ascii_letters + string.digits + "-" + "_" for c in k + ): + t = KeyType.Basic + else: + t = KeyType.Bare + + self.t = t + if sep is None: + sep = " = " + + self.sep = sep + self.key = k + if original is None: + key_str = escape_string(k) if t == KeyType.Basic else k + original = f"{t.value}{key_str}{t.value}" + + self._original = original + self._keys = [self] + self._dotted = False + + @property + def delimiter(self) -> str: + """The delimiter: double quote/single quote/none""" + return self.t.value + + def is_bare(self) -> bool: + """Check if the key is bare""" + return self.t == KeyType.Bare + + def __hash__(self) -> int: + return hash(self.key) + + def __eq__(self, other: Any) -> bool: + if isinstance(other, Key): + return isinstance(other, SingleKey) and self.key == other.key + + return self.key == other + + +class DottedKey(Key): + def __init__( + self, + keys: Iterable[Key], + sep: Optional[str] = None, + original: Optional[str] = None, + ) -> None: + self._keys = list(keys) + if original is None: + original = ".".join(k.as_string() for k in self._keys) + + self.sep = " = " if sep is None else sep + self._original = original + self._dotted = False + self.key = ".".join(k.key for k in self._keys) + + def __hash__(self) -> int: + return hash(tuple(self._keys)) + + def __eq__(self, __o: object) -> bool: + return isinstance(__o, DottedKey) and self._keys == __o._keys + + +class Item: + """ + An item within a TOML document. + """ + + def __init__(self, trivia: Trivia) -> None: + self._trivia = trivia + + @property + def trivia(self) -> Trivia: + """The trivia element associated with this item""" + return self._trivia + + @property + def discriminant(self) -> int: + raise NotImplementedError() + + def as_string(self) -> str: + """The TOML representation""" + raise NotImplementedError() + + @property + def value(self) -> Any: + return self + + def unwrap(self) -> Any: + """Returns as pure python object (ppo)""" + raise NotImplementedError() + + # Helpers + + def comment(self, comment: str) -> "Item": + """Attach a comment to this item""" + if not comment.strip().startswith("#"): + comment = "# " + comment + + self._trivia.comment_ws = " " + self._trivia.comment = comment + + return self + + def indent(self, indent: int) -> "Item": + """Indent this item with given number of spaces""" + if self._trivia.indent.startswith("\n"): + self._trivia.indent = "\n" + " " * indent + else: + self._trivia.indent = " " * indent + + return self + + def is_boolean(self) -> bool: + return isinstance(self, Bool) + + def is_table(self) -> bool: + return isinstance(self, Table) + + def is_inline_table(self) -> bool: + return isinstance(self, InlineTable) + + def is_aot(self) -> bool: + return isinstance(self, AoT) + + def _getstate(self, protocol=3): + return (self._trivia,) + + def __reduce__(self): + return self.__reduce_ex__(2) + + def __reduce_ex__(self, protocol): + return self.__class__, self._getstate(protocol) + + +class Whitespace(Item): + """ + A whitespace literal. + """ + + def __init__(self, s: str, fixed: bool = False) -> None: + self._s = s + self._fixed = fixed + + @property + def s(self) -> str: + return self._s + + @property + def value(self) -> str: + """The wrapped string of the whitespace""" + return self._s + + @property + def trivia(self) -> Trivia: + raise RuntimeError("Called trivia on a Whitespace variant.") + + @property + def discriminant(self) -> int: + return 0 + + def is_fixed(self) -> bool: + """If the whitespace is fixed, it can't be merged or discarded from the output.""" + return self._fixed + + def as_string(self) -> str: + return self._s + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {repr(self._s)}>" + + def _getstate(self, protocol=3): + return self._s, self._fixed + + +class Comment(Item): + """ + A comment literal. + """ + + @property + def discriminant(self) -> int: + return 1 + + def as_string(self) -> str: + return ( + f"{self._trivia.indent}{decode(self._trivia.comment)}{self._trivia.trail}" + ) + + def __str__(self) -> str: + return f"{self._trivia.indent}{decode(self._trivia.comment)}" + + +class Integer(int, Item): + """ + An integer literal. + """ + + def __new__(cls, value: int, trivia: Trivia, raw: str) -> "Integer": + return super().__new__(cls, value) + + def __init__(self, _: int, trivia: Trivia, raw: str) -> None: + super().__init__(trivia) + + self._raw = raw + self._sign = False + + if re.match(r"^[+\-]\d+$", raw): + self._sign = True + + def unwrap(self) -> int: + return int(self) + + @property + def discriminant(self) -> int: + return 2 + + @property + def value(self) -> int: + """The wrapped integer value""" + return self + + def as_string(self) -> str: + return self._raw + + def __add__(self, other): + return self._new(int(self._raw) + other) + + def __radd__(self, other): + result = super().__radd__(other) + + if isinstance(other, Integer): + return self._new(result) + + return result + + def __sub__(self, other): + result = super().__sub__(other) + + return self._new(result) + + def __rsub__(self, other): + result = super().__rsub__(other) + + if isinstance(other, Integer): + return self._new(result) + + return result + + def _new(self, result): + raw = str(result) + if self._sign: + sign = "+" if result >= 0 else "-" + raw = sign + raw + + return Integer(result, self._trivia, raw) + + def _getstate(self, protocol=3): + return int(self), self._trivia, self._raw + + +class Float(float, Item): + """ + A float literal. + """ + + def __new__(cls, value: float, trivia: Trivia, raw: str) -> Integer: + return super().__new__(cls, value) + + def __init__(self, _: float, trivia: Trivia, raw: str) -> None: + super().__init__(trivia) + + self._raw = raw + self._sign = False + + if re.match(r"^[+\-].+$", raw): + self._sign = True + + def unwrap(self) -> float: + return float(self) + + @property + def discriminant(self) -> int: + return 3 + + @property + def value(self) -> float: + """The wrapped float value""" + return self + + def as_string(self) -> str: + return self._raw + + def __add__(self, other): + result = super().__add__(other) + + return self._new(result) + + def __radd__(self, other): + result = super().__radd__(other) + + if isinstance(other, Float): + return self._new(result) + + return result + + def __sub__(self, other): + result = super().__sub__(other) + + return self._new(result) + + def __rsub__(self, other): + result = super().__rsub__(other) + + if isinstance(other, Float): + return self._new(result) + + return result + + def _new(self, result): + raw = str(result) + + if self._sign: + sign = "+" if result >= 0 else "-" + raw = sign + raw + + return Float(result, self._trivia, raw) + + def _getstate(self, protocol=3): + return float(self), self._trivia, self._raw + + +class Bool(Item): + """ + A boolean literal. + """ + + def __init__(self, t: int, trivia: Trivia) -> None: + super().__init__(trivia) + + self._value = bool(t) + + def unwrap(self) -> bool: + return bool(self) + + @property + def discriminant(self) -> int: + return 4 + + @property + def value(self) -> bool: + """The wrapped boolean value""" + return self._value + + def as_string(self) -> str: + return str(self._value).lower() + + def _getstate(self, protocol=3): + return self._value, self._trivia + + def __bool__(self): + return self._value + + __nonzero__ = __bool__ + + def __eq__(self, other): + if not isinstance(other, bool): + return NotImplemented + + return other == self._value + + def __hash__(self): + return hash(self._value) + + def __repr__(self): + return repr(self._value) + + +class DateTime(Item, datetime): + """ + A datetime literal. + """ + + def __new__( + cls, + year: int, + month: int, + day: int, + hour: int, + minute: int, + second: int, + microsecond: int, + tzinfo: Optional[tzinfo], + *_: Any, + **kwargs: Any, + ) -> datetime: + return datetime.__new__( + cls, + year, + month, + day, + hour, + minute, + second, + microsecond, + tzinfo=tzinfo, + **kwargs, + ) + + def __init__( + self, + year: int, + month: int, + day: int, + hour: int, + minute: int, + second: int, + microsecond: int, + tzinfo: Optional[tzinfo], + trivia: Optional[Trivia] = None, + raw: Optional[str] = None, + **kwargs: Any, + ) -> None: + super().__init__(trivia or Trivia()) + + self._raw = raw or self.isoformat() + + def unwrap(self) -> datetime: + ( + year, + month, + day, + hour, + minute, + second, + microsecond, + tzinfo, + _, + _, + ) = self._getstate() + return datetime(year, month, day, hour, minute, second, microsecond, tzinfo) + + @property + def discriminant(self) -> int: + return 5 + + @property + def value(self) -> datetime: + return self + + def as_string(self) -> str: + return self._raw + + def __add__(self, other): + if PY38: + result = datetime( + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + ).__add__(other) + else: + result = super().__add__(other) + + return self._new(result) + + def __sub__(self, other): + if PY38: + result = datetime( + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + ).__sub__(other) + else: + result = super().__sub__(other) + + if isinstance(result, datetime): + result = self._new(result) + + return result + + def replace(self, *args: Any, **kwargs: Any) -> datetime: + return self._new(super().replace(*args, **kwargs)) + + def astimezone(self, tz: tzinfo) -> datetime: + result = super().astimezone(tz) + if PY38: + return result + return self._new(result) + + def _new(self, result) -> "DateTime": + raw = result.isoformat() + + return DateTime( + result.year, + result.month, + result.day, + result.hour, + result.minute, + result.second, + result.microsecond, + result.tzinfo, + self._trivia, + raw, + ) + + def _getstate(self, protocol=3): + return ( + self.year, + self.month, + self.day, + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + self._trivia, + self._raw, + ) + + +class Date(Item, date): + """ + A date literal. + """ + + def __new__(cls, year: int, month: int, day: int, *_: Any) -> date: + return date.__new__(cls, year, month, day) + + def __init__( + self, year: int, month: int, day: int, trivia: Trivia, raw: str + ) -> None: + super().__init__(trivia) + + self._raw = raw + + def unwrap(self) -> date: + (year, month, day, _, _) = self._getstate() + return date(year, month, day) + + @property + def discriminant(self) -> int: + return 6 + + @property + def value(self) -> date: + return self + + def as_string(self) -> str: + return self._raw + + def __add__(self, other): + if PY38: + result = date(self.year, self.month, self.day).__add__(other) + else: + result = super().__add__(other) + + return self._new(result) + + def __sub__(self, other): + if PY38: + result = date(self.year, self.month, self.day).__sub__(other) + else: + result = super().__sub__(other) + + if isinstance(result, date): + result = self._new(result) + + return result + + def replace(self, *args: Any, **kwargs: Any) -> date: + return self._new(super().replace(*args, **kwargs)) + + def _new(self, result): + raw = result.isoformat() + + return Date(result.year, result.month, result.day, self._trivia, raw) + + def _getstate(self, protocol=3): + return (self.year, self.month, self.day, self._trivia, self._raw) + + +class Time(Item, time): + """ + A time literal. + """ + + def __new__( + cls, + hour: int, + minute: int, + second: int, + microsecond: int, + tzinfo: Optional[tzinfo], + *_: Any, + ) -> time: + return time.__new__(cls, hour, minute, second, microsecond, tzinfo) + + def __init__( + self, + hour: int, + minute: int, + second: int, + microsecond: int, + tzinfo: Optional[tzinfo], + trivia: Trivia, + raw: str, + ) -> None: + super().__init__(trivia) + + self._raw = raw + + def unwrap(self) -> time: + (hour, minute, second, microsecond, tzinfo, _, _) = self._getstate() + return time(hour, minute, second, microsecond, tzinfo) + + @property + def discriminant(self) -> int: + return 7 + + @property + def value(self) -> time: + return self + + def as_string(self) -> str: + return self._raw + + def replace(self, *args: Any, **kwargs: Any) -> time: + return self._new(super().replace(*args, **kwargs)) + + def _new(self, result): + raw = result.isoformat() + + return Time( + result.hour, + result.minute, + result.second, + result.microsecond, + result.tzinfo, + self._trivia, + raw, + ) + + def _getstate(self, protocol: int = 3) -> tuple: + return ( + self.hour, + self.minute, + self.second, + self.microsecond, + self.tzinfo, + self._trivia, + self._raw, + ) + + +class _ArrayItemGroup: + __slots__ = ("value", "indent", "comma", "comment") + + def __init__( + self, + value: Optional[Item] = None, + indent: Optional[Whitespace] = None, + comma: Optional[Whitespace] = None, + comment: Optional[Comment] = None, + ) -> None: + self.value = value + self.indent = indent + self.comma = comma + self.comment = comment + + def __iter__(self) -> Iterator[Item]: + return filter( + lambda x: x is not None, (self.indent, self.value, self.comma, self.comment) + ) + + def __repr__(self) -> str: + return repr(tuple(self)) + + def is_whitespace(self) -> bool: + return self.value is None and self.comment is None + + def __bool__(self) -> bool: + try: + next(iter(self)) + except StopIteration: + return False + return True + + +class Array(Item, _CustomList): + """ + An array literal + """ + + def __init__( + self, value: List[Item], trivia: Trivia, multiline: bool = False + ) -> None: + super().__init__(trivia) + list.__init__( + self, + [v.value for v in value if not isinstance(v, (Whitespace, Comment, Null))], + ) + self._index_map: Dict[int, int] = {} + self._value = self._group_values(value) + self._multiline = multiline + self._reindex() + + def _group_values(self, value: List[Item]) -> List[_ArrayItemGroup]: + """Group the values into (indent, value, comma, comment) tuples""" + groups = [] + this_group = _ArrayItemGroup() + for item in value: + if isinstance(item, Whitespace): + if "," not in item.s: + groups.append(this_group) + this_group = _ArrayItemGroup(indent=item) + else: + if this_group.value is None: + # when comma is met and no value is provided, add a dummy Null + this_group.value = Null() + this_group.comma = item + elif isinstance(item, Comment): + if this_group.value is None: + this_group.value = Null() + this_group.comment = item + elif this_group.value is None: + this_group.value = item + else: + groups.append(this_group) + this_group = _ArrayItemGroup(value=item) + groups.append(this_group) + return [group for group in groups if group] + + def unwrap(self) -> List[Any]: + unwrapped = [] + for v in self: + if isinstance(v, Item): + unwrapped.append(v.unwrap()) + else: + unwrapped.append(v) + return unwrapped + + @property + def discriminant(self) -> int: + return 8 + + @property + def value(self) -> list: + return self + + def _iter_items(self) -> Iterator[Item]: + for v in self._value: + yield from v + + def multiline(self, multiline: bool) -> "Array": + """Change the array to display in multiline or not. + + :Example: + + >>> a = item([1, 2, 3]) + >>> print(a.as_string()) + [1, 2, 3] + >>> print(a.multiline(True).as_string()) + [ + 1, + 2, + 3, + ] + """ + self._multiline = multiline + + return self + + def as_string(self) -> str: + if not self._multiline or not self._value: + return f'[{"".join(v.as_string() for v in self._iter_items())}]' + + s = "[\n" + s += "".join( + self.trivia.indent + + " " * 4 + + v.value.as_string() + + ("," if not isinstance(v.value, Null) else "") + + (v.comment.as_string() if v.comment is not None else "") + + "\n" + for v in self._value + if v.value is not None + ) + s += self.trivia.indent + "]" + + return s + + def _reindex(self) -> None: + self._index_map.clear() + index = 0 + for i, v in enumerate(self._value): + if v.value is None or isinstance(v.value, Null): + continue + self._index_map[index] = i + index += 1 + + def add_line( + self, + *items: Any, + indent: str = " ", + comment: Optional[str] = None, + add_comma: bool = True, + newline: bool = True, + ) -> None: + """Add multiple items in a line to control the format precisely. + When add_comma is True, only accept actual values and + ", " will be added between values automatically. + + :Example: + + >>> a = array() + >>> a.add_line(1, 2, 3) + >>> a.add_line(4, 5, 6) + >>> a.add_line(indent="") + >>> print(a.as_string()) + [ + 1, 2, 3, + 4, 5, 6, + ] + """ + new_values: List[Item] = [] + first_indent = f"\n{indent}" if newline else indent + if first_indent: + new_values.append(Whitespace(first_indent)) + whitespace = "" + data_values = [] + for i, el in enumerate(items): + it = item(el, _parent=self) + if isinstance(it, Comment) or add_comma and isinstance(el, Whitespace): + raise ValueError(f"item type {type(it)} is not allowed in add_line") + if not isinstance(it, Whitespace): + if whitespace: + new_values.append(Whitespace(whitespace)) + whitespace = "" + new_values.append(it) + data_values.append(it.value) + if add_comma: + new_values.append(Whitespace(",")) + if i != len(items) - 1: + new_values.append(Whitespace(" ")) + elif "," not in it.s: + whitespace += it.s + else: + new_values.append(it) + if whitespace: + new_values.append(Whitespace(whitespace)) + if comment: + indent = " " if items else "" + new_values.append( + Comment(Trivia(indent=indent, comment=f"# {comment}", trail="")) + ) + list.extend(self, data_values) + if len(self._value) > 0: + last_item = self._value[-1] + last_value_item = next( + ( + v + for v in self._value[::-1] + if v.value is not None and not isinstance(v.value, Null) + ), + None, + ) + if last_value_item is not None: + last_value_item.comma = Whitespace(",") + if last_item.is_whitespace(): + self._value[-1:-1] = self._group_values(new_values) + else: + self._value.extend(self._group_values(new_values)) + else: + self._value.extend(self._group_values(new_values)) + self._reindex() + + def clear(self) -> None: + """Clear the array.""" + list.clear(self) + self._index_map.clear() + self._value.clear() + + def __len__(self) -> int: + return list.__len__(self) + + def __getitem__(self, key: Union[int, slice]) -> Any: + return list.__getitem__(self, key) + + def __setitem__(self, key: Union[int, slice], value: Any) -> Any: + it = item(value, _parent=self) + list.__setitem__(self, key, it.value) + if isinstance(key, slice): + raise ValueError("slice assignment is not supported") + if key < 0: + key += len(self) + self._value[self._index_map[key]].value = it + + def insert(self, pos: int, value: Any) -> None: + it = item(value, _parent=self) + length = len(self) + if not isinstance(it, (Comment, Whitespace)): + list.insert(self, pos, it.value) + if pos < 0: + pos += length + if pos < 0: + pos = 0 + + idx = 0 # insert position of the self._value list + default_indent = " " + if pos < length: + try: + idx = self._index_map[pos] + except KeyError as e: + raise IndexError("list index out of range") from e + else: + idx = len(self._value) + if idx >= 1 and self._value[idx - 1].is_whitespace(): + # The last item is a pure whitespace(\n ), insert before it + idx -= 1 + if ( + self._value[idx].indent is not None + and "\n" in self._value[idx].indent.s + ): + default_indent = "\n " + indent: Optional[Item] = None + comma: Optional[Item] = Whitespace(",") if pos < length else None + if idx < len(self._value) and not self._value[idx].is_whitespace(): + # Prefer to copy the indentation from the item after + indent = self._value[idx].indent + if idx > 0: + last_item = self._value[idx - 1] + if indent is None: + indent = last_item.indent + if not isinstance(last_item.value, Null) and "\n" in default_indent: + # Copy the comma from the last item if 1) it contains a value and + # 2) the array is multiline + comma = last_item.comma + if last_item.comma is None and not isinstance(last_item.value, Null): + # Add comma to the last item to separate it from the following items. + last_item.comma = Whitespace(",") + if indent is None and (idx > 0 or "\n" in default_indent): + # apply default indent if it isn't the first item or the array is multiline. + indent = Whitespace(default_indent) + new_item = _ArrayItemGroup(value=it, indent=indent, comma=comma) + self._value.insert(idx, new_item) + self._reindex() + + def __delitem__(self, key: Union[int, slice]): + length = len(self) + list.__delitem__(self, key) + + if isinstance(key, slice): + indices_to_remove = list( + range(key.start or 0, key.stop or length, key.step or 1) + ) + else: + indices_to_remove = [length + key if key < 0 else key] + for i in sorted(indices_to_remove, reverse=True): + try: + idx = self._index_map[i] + except KeyError as e: + if not isinstance(key, slice): + raise IndexError("list index out of range") from e + else: + del self._value[idx] + if ( + idx == 0 + and len(self._value) > 0 + and "\n" not in self._value[idx].indent.s + ): + # Remove the indentation of the first item if not newline + self._value[idx].indent = None + if len(self._value) > 0: + v = self._value[-1] + if not v.is_whitespace(): + # remove the comma of the last item + v.comma = None + + self._reindex() + + def __str__(self): + return str([v.value.value for v in self._iter_items() if v.value is not None]) + + def _getstate(self, protocol=3): + return list(self._iter_items()), self._trivia, self._multiline + + +AT = TypeVar("AT", bound="AbstractTable") + + +class AbstractTable(Item, _CustomDict): + """Common behaviour of both :class:`Table` and :class:`InlineTable`""" + + def __init__(self, value: "container.Container", trivia: Trivia): + Item.__init__(self, trivia) + + self._value = value + + for k, v in self._value.body: + if k is not None: + dict.__setitem__(self, k.key, v) + + def unwrap(self) -> Dict[str, Any]: + unwrapped = {} + for k, v in self.items(): + if isinstance(k, Key): + k = k.key + if isinstance(v, Item): + v = v.unwrap() + unwrapped[k] = v + + return unwrapped + + @property + def value(self) -> "container.Container": + return self._value + + @overload + def append(self: AT, key: None, value: Union[Comment, Whitespace]) -> AT: + ... + + @overload + def append(self: AT, key: Union[Key, str], value: Any) -> AT: + ... + + def append(self, key, value): + raise NotImplementedError + + @overload + def add(self: AT, value: Union[Comment, Whitespace]) -> AT: + ... + + @overload + def add(self: AT, key: Union[Key, str], value: Any) -> AT: + ... + + def add(self, key, value=None): + if value is None: + if not isinstance(key, (Comment, Whitespace)): + msg = "Non comment/whitespace items must have an associated key" + raise ValueError(msg) + + key, value = None, key + + return self.append(key, value) + + def remove(self: AT, key: Union[Key, str]) -> AT: + self._value.remove(key) + + if isinstance(key, Key): + key = key.key + + if key is not None: + dict.__delitem__(self, key) + + return self + + def setdefault(self, key: Union[Key, str], default: Any) -> Any: + super().setdefault(key, default) + return self[key] + + def __str__(self): + return str(self.value) + + def copy(self: AT) -> AT: + return copy.copy(self) + + def __repr__(self) -> str: + return repr(self.value) + + def __iter__(self) -> Iterator[str]: + return iter(self._value) + + def __len__(self) -> int: + return len(self._value) + + def __delitem__(self, key: Union[Key, str]) -> None: + self.remove(key) + + def __getitem__(self, key: Union[Key, str]) -> Item: + return cast(Item, self._value[key]) + + def __setitem__(self, key: Union[Key, str], value: Any) -> None: + if not isinstance(value, Item): + value = item(value, _parent=self) + + is_replace = key in self + self._value[key] = value + + if key is not None: + dict.__setitem__(self, key, value) + + if is_replace: + return + m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) + if not m: + return + + indent = m.group(1) + + if not isinstance(value, Whitespace): + m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent) + if not m: + value.trivia.indent = indent + else: + value.trivia.indent = m.group(1) + indent + m.group(2) + + +class Table(AbstractTable): + """ + A table literal. + """ + + def __init__( + self, + value: "container.Container", + trivia: Trivia, + is_aot_element: bool, + is_super_table: Optional[bool] = None, + name: Optional[str] = None, + display_name: Optional[str] = None, + ) -> None: + super().__init__(value, trivia) + + self.name = name + self.display_name = display_name + self._is_aot_element = is_aot_element + self._is_super_table = is_super_table + + @property + def discriminant(self) -> int: + return 9 + + def __copy__(self) -> "Table": + return type(self)( + self._value.copy(), + self._trivia.copy(), + self._is_aot_element, + self._is_super_table, + self.name, + self.display_name, + ) + + def append(self, key, _item): + """ + Appends a (key, item) to the table. + """ + if not isinstance(_item, Item): + _item = item(_item, _parent=self) + + self._value.append(key, _item) + + if isinstance(key, Key): + key = next(iter(key)).key + _item = self._value[key] + + if key is not None: + dict.__setitem__(self, key, _item) + + m = re.match(r"(?s)^[^ ]*([ ]+).*$", self._trivia.indent) + if not m: + return self + + indent = m.group(1) + + if not isinstance(_item, Whitespace): + m = re.match("(?s)^([^ ]*)(.*)$", _item.trivia.indent) + if not m: + _item.trivia.indent = indent + else: + _item.trivia.indent = m.group(1) + indent + m.group(2) + + return self + + def raw_append(self, key: Union[Key, str], _item: Any) -> "Table": + """Similar to :meth:`append` but does not copy indentation.""" + if not isinstance(_item, Item): + _item = item(_item) + + self._value.append(key, _item) + + if isinstance(key, Key): + key = next(iter(key)).key + _item = self._value[key] + + if key is not None: + dict.__setitem__(self, key, _item) + + return self + + def is_aot_element(self) -> bool: + """True if the table is the direct child of an AOT element.""" + return self._is_aot_element + + def is_super_table(self) -> bool: + """A super table is the intermediate parent of a nested table as in [a.b.c]. + If true, it won't appear in the TOML representation.""" + if self._is_super_table is not None: + return self._is_super_table + # If the table has only one child and that child is a table, then it is a super table. + if len(self) != 1: + return False + only_child = next(iter(self.values())) + return isinstance(only_child, (Table, AoT)) + + def as_string(self) -> str: + return self._value.as_string() + + # Helpers + + def indent(self, indent: int) -> "Table": + """Indent the table with given number of spaces.""" + super().indent(indent) + + m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) + if not m: + indent_str = "" + else: + indent_str = m.group(1) + + for _, item in self._value.body: + if not isinstance(item, Whitespace): + item.trivia.indent = indent_str + item.trivia.indent + + return self + + def invalidate_display_name(self): + self.display_name = None + + for child in self.values(): + if hasattr(child, "invalidate_display_name"): + child.invalidate_display_name() + + def _getstate(self, protocol: int = 3) -> tuple: + return ( + self._value, + self._trivia, + self._is_aot_element, + self._is_super_table, + self.name, + self.display_name, + ) + + +class InlineTable(AbstractTable): + """ + An inline table literal. + """ + + def __init__( + self, value: "container.Container", trivia: Trivia, new: bool = False + ) -> None: + super().__init__(value, trivia) + + self._new = new + + @property + def discriminant(self) -> int: + return 10 + + def append(self, key, _item): + """ + Appends a (key, item) to the table. + """ + if not isinstance(_item, Item): + _item = item(_item, _parent=self) + + if not isinstance(_item, (Whitespace, Comment)): + if not _item.trivia.indent and len(self._value) > 0 and not self._new: + _item.trivia.indent = " " + if _item.trivia.comment: + _item.trivia.comment = "" + + self._value.append(key, _item) + + if isinstance(key, Key): + key = key.key + + if key is not None: + dict.__setitem__(self, key, _item) + + return self + + def as_string(self) -> str: + buf = "{" + for i, (k, v) in enumerate(self._value.body): + if k is None: + if i == len(self._value.body) - 1: + if self._new: + buf = buf.rstrip(", ") + else: + buf = buf.rstrip(",") + + buf += v.as_string() + + continue + + v_trivia_trail = v.trivia.trail.replace("\n", "") + buf += ( + f"{v.trivia.indent}" + f'{k.as_string() + ("." if k.is_dotted() else "")}' + f"{k.sep}" + f"{v.as_string()}" + f"{v.trivia.comment}" + f"{v_trivia_trail}" + ) + + if i != len(self._value.body) - 1: + buf += "," + if self._new: + buf += " " + + buf += "}" + + return buf + + def __setitem__(self, key: Union[Key, str], value: Any) -> None: + if hasattr(value, "trivia") and value.trivia.comment: + value.trivia.comment = "" + super().__setitem__(key, value) + + def __copy__(self) -> "InlineTable": + return type(self)(self._value.copy(), self._trivia.copy(), self._new) + + def _getstate(self, protocol: int = 3) -> tuple: + return (self._value, self._trivia) + + +class String(str, Item): + """ + A string literal. + """ + + def __new__(cls, t, value, original, trivia): + return super().__new__(cls, value) + + def __init__(self, t: StringType, _: str, original: str, trivia: Trivia) -> None: + super().__init__(trivia) + + self._t = t + self._original = original + + def unwrap(self) -> str: + return str(self) + + @property + def discriminant(self) -> int: + return 11 + + @property + def value(self) -> str: + return self + + def as_string(self) -> str: + return f"{self._t.value}{decode(self._original)}{self._t.value}" + + def __add__(self: ItemT, other: str) -> ItemT: + if not isinstance(other, str): + return NotImplemented + result = super().__add__(other) + original = self._original + getattr(other, "_original", other) + + return self._new(result, original) + + def _new(self, result: str, original: str) -> "String": + return String(self._t, result, original, self._trivia) + + def _getstate(self, protocol=3): + return self._t, str(self), self._original, self._trivia + + @classmethod + def from_raw(cls, value: str, type_=StringType.SLB, escape=True) -> "String": + value = decode(value) + + invalid = type_.invalid_sequences + if any(c in value for c in invalid): + raise InvalidStringError(value, invalid, type_.value) + + escaped = type_.escaped_sequences + string_value = escape_string(value, escaped) if escape and escaped else value + + return cls(type_, decode(value), string_value, Trivia()) + + +class AoT(Item, _CustomList): + """ + An array of table literal + """ + + def __init__( + self, body: List[Table], name: Optional[str] = None, parsed: bool = False + ) -> None: + self.name = name + self._body: List[Table] = [] + self._parsed = parsed + + super().__init__(Trivia(trail="")) + + for table in body: + self.append(table) + + def unwrap(self) -> List[Dict[str, Any]]: + unwrapped = [] + for t in self._body: + if isinstance(t, Item): + unwrapped.append(t.unwrap()) + else: + unwrapped.append(t) + return unwrapped + + @property + def body(self) -> List[Table]: + return self._body + + @property + def discriminant(self) -> int: + return 12 + + @property + def value(self) -> List[Dict[Any, Any]]: + return [v.value for v in self._body] + + def __len__(self) -> int: + return len(self._body) + + @overload + def __getitem__(self, key: slice) -> List[Table]: + ... + + @overload + def __getitem__(self, key: int) -> Table: + ... + + def __getitem__(self, key): + return self._body[key] + + def __setitem__(self, key: Union[slice, int], value: Any) -> None: + raise NotImplementedError + + def __delitem__(self, key: Union[slice, int]) -> None: + del self._body[key] + list.__delitem__(self, key) + + def insert(self, index: int, value: dict) -> None: + value = item(value, _parent=self) + if not isinstance(value, Table): + raise ValueError(f"Unsupported insert value type: {type(value)}") + length = len(self) + if index < 0: + index += length + if index < 0: + index = 0 + elif index >= length: + index = length + m = re.match("(?s)^[^ ]*([ ]+).*$", self._trivia.indent) + if m: + indent = m.group(1) + + m = re.match("(?s)^([^ ]*)(.*)$", value.trivia.indent) + if not m: + value.trivia.indent = indent + else: + value.trivia.indent = m.group(1) + indent + m.group(2) + prev_table = self._body[index - 1] if 0 < index and length else None + next_table = self._body[index + 1] if index < length - 1 else None + if not self._parsed: + if prev_table and "\n" not in value.trivia.indent: + value.trivia.indent = "\n" + value.trivia.indent + if next_table and "\n" not in next_table.trivia.indent: + next_table.trivia.indent = "\n" + next_table.trivia.indent + self._body.insert(index, value) + list.insert(self, index, value) + + def invalidate_display_name(self): + """Call ``invalidate_display_name`` on the contained tables""" + for child in self: + if hasattr(child, "invalidate_display_name"): + child.invalidate_display_name() + + def as_string(self) -> str: + b = "" + for table in self._body: + b += table.as_string() + + return b + + def __repr__(self) -> str: + return f"" + + def _getstate(self, protocol=3): + return self._body, self.name, self._parsed + + +class Null(Item): + """ + A null item. + """ + + def __init__(self) -> None: + pass + + def unwrap(self) -> None: + return None + + @property + def discriminant(self) -> int: + return -1 + + @property + def value(self) -> None: + return None + + def as_string(self) -> str: + return "" + + def _getstate(self, protocol=3) -> tuple: + return () diff --git a/src/poetry/core/_vendor/tomlkit/parser.py b/src/poetry/core/_vendor/tomlkit/parser.py new file mode 100644 index 0000000..c6393a5 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/parser.py @@ -0,0 +1,1138 @@ +import datetime +import re +import string + +from typing import List +from typing import Optional +from typing import Tuple +from typing import Type +from typing import Union + +from tomlkit._compat import decode +from tomlkit._utils import RFC_3339_LOOSE +from tomlkit._utils import _escaped +from tomlkit._utils import parse_rfc3339 +from tomlkit.container import Container +from tomlkit.exceptions import EmptyKeyError +from tomlkit.exceptions import EmptyTableNameError +from tomlkit.exceptions import InternalParserError +from tomlkit.exceptions import InvalidCharInStringError +from tomlkit.exceptions import InvalidControlChar +from tomlkit.exceptions import InvalidDateError +from tomlkit.exceptions import InvalidDateTimeError +from tomlkit.exceptions import InvalidNumberError +from tomlkit.exceptions import InvalidTimeError +from tomlkit.exceptions import InvalidUnicodeValueError +from tomlkit.exceptions import ParseError +from tomlkit.exceptions import UnexpectedCharError +from tomlkit.exceptions import UnexpectedEofError +from tomlkit.items import AoT +from tomlkit.items import Array +from tomlkit.items import Bool +from tomlkit.items import BoolType +from tomlkit.items import Comment +from tomlkit.items import Date +from tomlkit.items import DateTime +from tomlkit.items import Float +from tomlkit.items import InlineTable +from tomlkit.items import Integer +from tomlkit.items import Item +from tomlkit.items import Key +from tomlkit.items import KeyType +from tomlkit.items import Null +from tomlkit.items import SingleKey +from tomlkit.items import String +from tomlkit.items import StringType +from tomlkit.items import Table +from tomlkit.items import Time +from tomlkit.items import Trivia +from tomlkit.items import Whitespace +from tomlkit.source import Source +from tomlkit.toml_char import TOMLChar +from tomlkit.toml_document import TOMLDocument + + +CTRL_I = 0x09 # Tab +CTRL_J = 0x0A # Line feed +CTRL_M = 0x0D # Carriage return +CTRL_CHAR_LIMIT = 0x1F +CHR_DEL = 0x7F + + +class Parser: + """ + Parser for TOML documents. + """ + + def __init__(self, string: str) -> None: + # Input to parse + self._src = Source(decode(string)) + + self._aot_stack: List[Key] = [] + + @property + def _state(self): + return self._src.state + + @property + def _idx(self): + return self._src.idx + + @property + def _current(self): + return self._src.current + + @property + def _marker(self): + return self._src.marker + + def extract(self) -> str: + """ + Extracts the value between marker and index + """ + return self._src.extract() + + def inc(self, exception: Optional[Type[ParseError]] = None) -> bool: + """ + Increments the parser if the end of the input has not been reached. + Returns whether or not it was able to advance. + """ + return self._src.inc(exception=exception) + + def inc_n(self, n: int, exception: Optional[Type[ParseError]] = None) -> bool: + """ + Increments the parser by n characters + if the end of the input has not been reached. + """ + return self._src.inc_n(n=n, exception=exception) + + def consume(self, chars, min=0, max=-1): + """ + Consume chars until min/max is satisfied is valid. + """ + return self._src.consume(chars=chars, min=min, max=max) + + def end(self) -> bool: + """ + Returns True if the parser has reached the end of the input. + """ + return self._src.end() + + def mark(self) -> None: + """ + Sets the marker to the index's current position + """ + self._src.mark() + + def parse_error(self, exception=ParseError, *args, **kwargs): + """ + Creates a generic "parse error" at the current position. + """ + return self._src.parse_error(exception, *args, **kwargs) + + def parse(self) -> TOMLDocument: + body = TOMLDocument(True) + + # Take all keyvals outside of tables/AoT's. + while not self.end(): + # Break out if a table is found + if self._current == "[": + break + + # Otherwise, take and append one KV + item = self._parse_item() + if not item: + break + + key, value = item + if (key is not None and key.is_multi()) or not self._merge_ws(value, body): + # We actually have a table + try: + body.append(key, value) + except Exception as e: + raise self.parse_error(ParseError, str(e)) from e + + self.mark() + + while not self.end(): + key, value = self._parse_table() + if isinstance(value, Table) and value.is_aot_element(): + # This is just the first table in an AoT. Parse the rest of the array + # along with it. + value = self._parse_aot(value, key) + + try: + body.append(key, value) + except Exception as e: + raise self.parse_error(ParseError, str(e)) from e + + body.parsing(False) + + return body + + def _merge_ws(self, item: Item, container: Container) -> bool: + """ + Merges the given Item with the last one currently in the given Container if + both are whitespace items. + + Returns True if the items were merged. + """ + last = container.last_item() + if not last: + return False + + if not isinstance(item, Whitespace) or not isinstance(last, Whitespace): + return False + + start = self._idx - (len(last.s) + len(item.s)) + container.body[-1] = ( + container.body[-1][0], + Whitespace(self._src[start : self._idx]), + ) + + return True + + def _is_child(self, parent: Key, child: Key) -> bool: + """ + Returns whether a key is strictly a child of another key. + AoT siblings are not considered children of one another. + """ + parent_parts = tuple(parent) + child_parts = tuple(child) + + if parent_parts == child_parts: + return False + + return parent_parts == child_parts[: len(parent_parts)] + + def _parse_item(self) -> Optional[Tuple[Optional[Key], Item]]: + """ + Attempts to parse the next item and returns it, along with its key + if the item is value-like. + """ + self.mark() + with self._state as state: + while True: + c = self._current + if c == "\n": + # Found a newline; Return all whitespace found up to this point. + self.inc() + + return None, Whitespace(self.extract()) + elif c in " \t\r": + # Skip whitespace. + if not self.inc(): + return None, Whitespace(self.extract()) + elif c == "#": + # Found a comment, parse it + indent = self.extract() + cws, comment, trail = self._parse_comment_trail() + + return None, Comment(Trivia(indent, cws, comment, trail)) + elif c == "[": + # Found a table, delegate to the calling function. + return + else: + # Beginning of a KV pair. + # Return to beginning of whitespace so it gets included + # as indentation for the KV about to be parsed. + state.restore = True + break + + return self._parse_key_value(True) + + def _parse_comment_trail(self, parse_trail: bool = True) -> Tuple[str, str, str]: + """ + Returns (comment_ws, comment, trail) + If there is no comment, comment_ws and comment will + simply be empty. + """ + if self.end(): + return "", "", "" + + comment = "" + comment_ws = "" + self.mark() + + while True: + c = self._current + + if c == "\n": + break + elif c == "#": + comment_ws = self.extract() + + self.mark() + self.inc() # Skip # + + # The comment itself + while not self.end() and not self._current.is_nl(): + code = ord(self._current) + if code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I: + raise self.parse_error(InvalidControlChar, code, "comments") + + if not self.inc(): + break + + comment = self.extract() + self.mark() + + break + elif c in " \t\r": + self.inc() + else: + raise self.parse_error(UnexpectedCharError, c) + + if self.end(): + break + + trail = "" + if parse_trail: + while self._current.is_spaces() and self.inc(): + pass + + if self._current == "\r": + self.inc() + + if self._current == "\n": + self.inc() + + if self._idx != self._marker or self._current.is_ws(): + trail = self.extract() + + return comment_ws, comment, trail + + def _parse_key_value(self, parse_comment: bool = False) -> Tuple[Key, Item]: + # Leading indent + self.mark() + + while self._current.is_spaces() and self.inc(): + pass + + indent = self.extract() + + # Key + key = self._parse_key() + + self.mark() + + found_equals = self._current == "=" + while self._current.is_kv_sep() and self.inc(): + if self._current == "=": + if found_equals: + raise self.parse_error(UnexpectedCharError, "=") + else: + found_equals = True + if not found_equals: + raise self.parse_error(UnexpectedCharError, self._current) + + if not key.sep: + key.sep = self.extract() + else: + key.sep += self.extract() + + # Value + val = self._parse_value() + # Comment + if parse_comment: + cws, comment, trail = self._parse_comment_trail() + meta = val.trivia + if not meta.comment_ws: + meta.comment_ws = cws + + meta.comment = comment + meta.trail = trail + else: + val.trivia.trail = "" + + val.trivia.indent = indent + + return key, val + + def _parse_key(self) -> Key: + """ + Parses a Key at the current position; + WS before the key must be exhausted first at the callsite. + """ + self.mark() + while self._current.is_spaces() and self.inc(): + # Skip any leading whitespace + pass + if self._current in "\"'": + return self._parse_quoted_key() + else: + return self._parse_bare_key() + + def _parse_quoted_key(self) -> Key: + """ + Parses a key enclosed in either single or double quotes. + """ + # Extract the leading whitespace + original = self.extract() + quote_style = self._current + key_type = next((t for t in KeyType if t.value == quote_style), None) + + if key_type is None: + raise RuntimeError("Should not have entered _parse_quoted_key()") + + key_str = self._parse_string( + StringType.SLB if key_type == KeyType.Basic else StringType.SLL + ) + if key_str._t.is_multiline(): + raise self.parse_error(UnexpectedCharError, key_str._t.value) + original += key_str.as_string() + self.mark() + while self._current.is_spaces() and self.inc(): + pass + original += self.extract() + key = SingleKey(str(key_str), t=key_type, sep="", original=original) + if self._current == ".": + self.inc() + key = key.concat(self._parse_key()) + + return key + + def _parse_bare_key(self) -> Key: + """ + Parses a bare key. + """ + while ( + self._current.is_bare_key_char() or self._current.is_spaces() + ) and self.inc(): + pass + + original = self.extract() + key = original.strip() + if not key: + # Empty key + raise self.parse_error(EmptyKeyError) + + if " " in key: + # Bare key with spaces in it + raise self.parse_error(ParseError, f'Invalid key "{key}"') + + key = SingleKey(key, KeyType.Bare, "", original) + + if self._current == ".": + self.inc() + key = key.concat(self._parse_key()) + + return key + + def _parse_value(self) -> Item: + """ + Attempts to parse a value at the current position. + """ + self.mark() + c = self._current + trivia = Trivia() + + if c == StringType.SLB.value: + return self._parse_basic_string() + elif c == StringType.SLL.value: + return self._parse_literal_string() + elif c == BoolType.TRUE.value[0]: + return self._parse_true() + elif c == BoolType.FALSE.value[0]: + return self._parse_false() + elif c == "[": + return self._parse_array() + elif c == "{": + return self._parse_inline_table() + elif c in "+-" or self._peek(4) in { + "+inf", + "-inf", + "inf", + "+nan", + "-nan", + "nan", + }: + # Number + while self._current not in " \t\n\r#,]}" and self.inc(): + pass + + raw = self.extract() + + item = self._parse_number(raw, trivia) + if item is not None: + return item + + raise self.parse_error(InvalidNumberError) + elif c in string.digits: + # Integer, Float, Date, Time or DateTime + while self._current not in " \t\n\r#,]}" and self.inc(): + pass + + raw = self.extract() + + m = RFC_3339_LOOSE.match(raw) + if m: + if m.group(1) and m.group(5): + # datetime + try: + dt = parse_rfc3339(raw) + assert isinstance(dt, datetime.datetime) + return DateTime( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + trivia, + raw, + ) + except ValueError: + raise self.parse_error(InvalidDateTimeError) + + if m.group(1): + try: + dt = parse_rfc3339(raw) + assert isinstance(dt, datetime.date) + date = Date(dt.year, dt.month, dt.day, trivia, raw) + self.mark() + while self._current not in "\t\n\r#,]}" and self.inc(): + pass + + time_raw = self.extract() + if not time_raw.strip(): + trivia.comment_ws = time_raw + return date + + dt = parse_rfc3339(raw + time_raw) + assert isinstance(dt, datetime.datetime) + return DateTime( + dt.year, + dt.month, + dt.day, + dt.hour, + dt.minute, + dt.second, + dt.microsecond, + dt.tzinfo, + trivia, + raw + time_raw, + ) + except ValueError: + raise self.parse_error(InvalidDateError) + + if m.group(5): + try: + t = parse_rfc3339(raw) + assert isinstance(t, datetime.time) + return Time( + t.hour, + t.minute, + t.second, + t.microsecond, + t.tzinfo, + trivia, + raw, + ) + except ValueError: + raise self.parse_error(InvalidTimeError) + + item = self._parse_number(raw, trivia) + if item is not None: + return item + + raise self.parse_error(InvalidNumberError) + else: + raise self.parse_error(UnexpectedCharError, c) + + def _parse_true(self): + return self._parse_bool(BoolType.TRUE) + + def _parse_false(self): + return self._parse_bool(BoolType.FALSE) + + def _parse_bool(self, style: BoolType) -> Bool: + with self._state: + style = BoolType(style) + + # only keep parsing for bool if the characters match the style + # try consuming rest of chars in style + for c in style: + self.consume(c, min=1, max=1) + + return Bool(style, Trivia()) + + def _parse_array(self) -> Array: + # Consume opening bracket, EOF here is an issue (middle of array) + self.inc(exception=UnexpectedEofError) + + elems: List[Item] = [] + prev_value = None + while True: + # consume whitespace + mark = self._idx + self.consume(TOMLChar.SPACES + TOMLChar.NL) + indent = self._src[mark : self._idx] + newline = set(TOMLChar.NL) & set(indent) + if newline: + elems.append(Whitespace(indent)) + continue + + # consume comment + if self._current == "#": + cws, comment, trail = self._parse_comment_trail(parse_trail=False) + elems.append(Comment(Trivia(indent, cws, comment, trail))) + continue + + # consume indent + if indent: + elems.append(Whitespace(indent)) + continue + + # consume value + if not prev_value: + try: + elems.append(self._parse_value()) + prev_value = True + continue + except UnexpectedCharError: + pass + + # consume comma + if prev_value and self._current == ",": + self.inc(exception=UnexpectedEofError) + elems.append(Whitespace(",")) + prev_value = False + continue + + # consume closing bracket + if self._current == "]": + # consume closing bracket, EOF here doesn't matter + self.inc() + break + + raise self.parse_error(UnexpectedCharError, self._current) + + try: + res = Array(elems, Trivia()) + except ValueError: + pass + else: + return res + + def _parse_inline_table(self) -> InlineTable: + # consume opening bracket, EOF here is an issue (middle of array) + self.inc(exception=UnexpectedEofError) + + elems = Container(True) + trailing_comma = None + while True: + # consume leading whitespace + mark = self._idx + self.consume(TOMLChar.SPACES) + raw = self._src[mark : self._idx] + if raw: + elems.add(Whitespace(raw)) + + if not trailing_comma: + # None: empty inline table + # False: previous key-value pair was not followed by a comma + if self._current == "}": + # consume closing bracket, EOF here doesn't matter + self.inc() + break + + if ( + trailing_comma is False + or trailing_comma is None + and self._current == "," + ): + # Either the previous key-value pair was not followed by a comma + # or the table has an unexpected leading comma. + raise self.parse_error(UnexpectedCharError, self._current) + else: + # True: previous key-value pair was followed by a comma + if self._current == "}" or self._current == ",": + raise self.parse_error(UnexpectedCharError, self._current) + + key, val = self._parse_key_value(False) + elems.add(key, val) + + # consume trailing whitespace + mark = self._idx + self.consume(TOMLChar.SPACES) + raw = self._src[mark : self._idx] + if raw: + elems.add(Whitespace(raw)) + + # consume trailing comma + trailing_comma = self._current == "," + if trailing_comma: + # consume closing bracket, EOF here is an issue (middle of inline table) + self.inc(exception=UnexpectedEofError) + + return InlineTable(elems, Trivia()) + + def _parse_number(self, raw: str, trivia: Trivia) -> Optional[Item]: + # Leading zeros are not allowed + sign = "" + if raw.startswith(("+", "-")): + sign = raw[0] + raw = raw[1:] + + if len(raw) > 1 and ( + raw.startswith("0") + and not raw.startswith(("0.", "0o", "0x", "0b", "0e")) + or sign + and raw.startswith(".") + ): + return None + + if raw.startswith(("0o", "0x", "0b")) and sign: + return None + + digits = "[0-9]" + base = 10 + if raw.startswith("0b"): + digits = "[01]" + base = 2 + elif raw.startswith("0o"): + digits = "[0-7]" + base = 8 + elif raw.startswith("0x"): + digits = "[0-9a-f]" + base = 16 + + # Underscores should be surrounded by digits + clean = re.sub(f"(?i)(?<={digits})_(?={digits})", "", raw).lower() + + if "_" in clean: + return None + + if ( + clean.endswith(".") + or not clean.startswith("0x") + and clean.split("e", 1)[0].endswith(".") + ): + return None + + try: + return Integer(int(sign + clean, base), trivia, sign + raw) + except ValueError: + try: + return Float(float(sign + clean), trivia, sign + raw) + except ValueError: + return None + + def _parse_literal_string(self) -> String: + with self._state: + return self._parse_string(StringType.SLL) + + def _parse_basic_string(self) -> String: + with self._state: + return self._parse_string(StringType.SLB) + + def _parse_escaped_char(self, multiline): + if multiline and self._current.is_ws(): + # When the last non-whitespace character on a line is + # a \, it will be trimmed along with all whitespace + # (including newlines) up to the next non-whitespace + # character or closing delimiter. + # """\ + # hello \ + # world""" + tmp = "" + while self._current.is_ws(): + tmp += self._current + # consume the whitespace, EOF here is an issue + # (middle of string) + self.inc(exception=UnexpectedEofError) + continue + + # the escape followed by whitespace must have a newline + # before any other chars + if "\n" not in tmp: + raise self.parse_error(InvalidCharInStringError, self._current) + + return "" + + if self._current in _escaped: + c = _escaped[self._current] + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + return c + + if self._current in {"u", "U"}: + # this needs to be a unicode + u, ue = self._peek_unicode(self._current == "U") + if u is not None: + # consume the U char and the unicode value + self.inc_n(len(ue) + 1) + + return u + + raise self.parse_error(InvalidUnicodeValueError) + + raise self.parse_error(InvalidCharInStringError, self._current) + + def _parse_string(self, delim: StringType) -> String: + # only keep parsing for string if the current character matches the delim + if self._current != delim.unit: + raise self.parse_error( + InternalParserError, + f"Invalid character for string type {delim}", + ) + + # consume the opening/first delim, EOF here is an issue + # (middle of string or middle of delim) + self.inc(exception=UnexpectedEofError) + + if self._current == delim.unit: + # consume the closing/second delim, we do not care if EOF occurs as + # that would simply imply an empty single line string + if not self.inc() or self._current != delim.unit: + # Empty string + return String(delim, "", "", Trivia()) + + # consume the third delim, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + delim = delim.toggle() # convert delim to multi delim + + self.mark() # to extract the original string with whitespace and all + value = "" + + # A newline immediately following the opening delimiter will be trimmed. + if delim.is_multiline() and self._current == "\n": + # consume the newline, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + escaped = False # whether the previous key was ESCAPE + while True: + code = ord(self._current) + if ( + delim.is_singleline() + and not escaped + and (code == CHR_DEL or code <= CTRL_CHAR_LIMIT and code != CTRL_I) + ) or ( + delim.is_multiline() + and not escaped + and ( + code == CHR_DEL + or code <= CTRL_CHAR_LIMIT + and code not in [CTRL_I, CTRL_J, CTRL_M] + ) + ): + raise self.parse_error(InvalidControlChar, code, "strings") + elif not escaped and self._current == delim.unit: + # try to process current as a closing delim + original = self.extract() + + close = "" + if delim.is_multiline(): + # Consume the delimiters to see if we are at the end of the string + close = "" + while self._current == delim.unit: + close += self._current + self.inc() + + if len(close) < 3: + # Not a triple quote, leave in result as-is. + # Adding back the characters we already consumed + value += close + continue + + if len(close) == 3: + # We are at the end of the string + return String(delim, value, original, Trivia()) + + if len(close) >= 6: + raise self.parse_error(InvalidCharInStringError, self._current) + + value += close[:-3] + original += close[:-3] + + return String(delim, value, original, Trivia()) + else: + # consume the closing delim, we do not care if EOF occurs as + # that would simply imply the end of self._src + self.inc() + + return String(delim, value, original, Trivia()) + elif delim.is_basic() and escaped: + # attempt to parse the current char as an escaped value, an exception + # is raised if this fails + value += self._parse_escaped_char(delim.is_multiline()) + + # no longer escaped + escaped = False + elif delim.is_basic() and self._current == "\\": + # the next char is being escaped + escaped = True + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + else: + # this is either a literal string where we keep everything as is, + # or this is not a special escaped char in a basic string + value += self._current + + # consume this char, EOF here is an issue (middle of string) + self.inc(exception=UnexpectedEofError) + + def _parse_table( + self, parent_name: Optional[Key] = None, parent: Optional[Table] = None + ) -> Tuple[Key, Union[Table, AoT]]: + """ + Parses a table element. + """ + if self._current != "[": + raise self.parse_error( + InternalParserError, "_parse_table() called on non-bracket character." + ) + + indent = self.extract() + self.inc() # Skip opening bracket + + if self.end(): + raise self.parse_error(UnexpectedEofError) + + is_aot = False + if self._current == "[": + if not self.inc(): + raise self.parse_error(UnexpectedEofError) + + is_aot = True + try: + key = self._parse_key() + except EmptyKeyError: + raise self.parse_error(EmptyTableNameError) from None + if self.end(): + raise self.parse_error(UnexpectedEofError) + elif self._current != "]": + raise self.parse_error(UnexpectedCharError, self._current) + elif not key.key.strip(): + raise self.parse_error(EmptyTableNameError) + + key.sep = "" + full_key = key + name_parts = tuple(key) + if any(" " in part.key.strip() and part.is_bare() for part in name_parts): + raise self.parse_error( + ParseError, f'Invalid table name "{full_key.as_string()}"' + ) + + missing_table = False + if parent_name: + parent_name_parts = tuple(parent_name) + else: + parent_name_parts = () + + if len(name_parts) > len(parent_name_parts) + 1: + missing_table = True + + name_parts = name_parts[len(parent_name_parts) :] + + values = Container(True) + + self.inc() # Skip closing bracket + if is_aot: + # TODO: Verify close bracket + self.inc() + + cws, comment, trail = self._parse_comment_trail() + + result = Null() + table = Table( + values, + Trivia(indent, cws, comment, trail), + is_aot, + name=name_parts[0].key if name_parts else key.key, + display_name=full_key.as_string(), + is_super_table=False, + ) + + if len(name_parts) > 1: + if missing_table: + # Missing super table + # i.e. a table initialized like this: [foo.bar] + # without initializing [foo] + # + # So we have to create the parent tables + table = Table( + Container(True), + Trivia(indent, cws, comment, trail), + is_aot and name_parts[0] in self._aot_stack, + is_super_table=True, + name=name_parts[0].key, + ) + + result = table + key = name_parts[0] + + for i, _name in enumerate(name_parts[1:]): + child = table.get( + _name, + Table( + Container(True), + Trivia(indent, cws, comment, trail), + is_aot and i == len(name_parts) - 2, + is_super_table=i < len(name_parts) - 2, + name=_name.key, + display_name=full_key.as_string() + if i == len(name_parts) - 2 + else None, + ), + ) + + if is_aot and i == len(name_parts) - 2: + table.raw_append(_name, AoT([child], name=table.name, parsed=True)) + else: + table.raw_append(_name, child) + + table = child + values = table.value + else: + if name_parts: + key = name_parts[0] + + while not self.end(): + item = self._parse_item() + if item: + _key, item = item + if not self._merge_ws(item, values): + table.raw_append(_key, item) + else: + if self._current == "[": + _, key_next = self._peek_table() + + if self._is_child(full_key, key_next): + key_next, table_next = self._parse_table(full_key, table) + + table.raw_append(key_next, table_next) + + # Picking up any sibling + while not self.end(): + _, key_next = self._peek_table() + + if not self._is_child(full_key, key_next): + break + + key_next, table_next = self._parse_table(full_key, table) + + table.raw_append(key_next, table_next) + + break + else: + raise self.parse_error( + InternalParserError, + "_parse_item() returned None on a non-bracket character.", + ) + + if isinstance(result, Null): + result = table + + if is_aot and (not self._aot_stack or full_key != self._aot_stack[-1]): + result = self._parse_aot(result, full_key) + + return key, result + + def _peek_table(self) -> Tuple[bool, Key]: + """ + Peeks ahead non-intrusively by cloning then restoring the + initial state of the parser. + + Returns the name of the table about to be parsed, + as well as whether it is part of an AoT. + """ + # we always want to restore after exiting this scope + with self._state(save_marker=True, restore=True): + if self._current != "[": + raise self.parse_error( + InternalParserError, + "_peek_table() entered on non-bracket character", + ) + + # AoT + self.inc() + is_aot = False + if self._current == "[": + self.inc() + is_aot = True + try: + return is_aot, self._parse_key() + except EmptyKeyError: + raise self.parse_error(EmptyTableNameError) from None + + def _parse_aot(self, first: Table, name_first: Key) -> AoT: + """ + Parses all siblings of the provided table first and bundles them into + an AoT. + """ + payload = [first] + self._aot_stack.append(name_first) + while not self.end(): + is_aot_next, name_next = self._peek_table() + if is_aot_next and name_next == name_first: + _, table = self._parse_table(name_first) + payload.append(table) + else: + break + + self._aot_stack.pop() + + return AoT(payload, parsed=True) + + def _peek(self, n: int) -> str: + """ + Peeks ahead n characters. + + n is the max number of characters that will be peeked. + """ + # we always want to restore after exiting this scope + with self._state(restore=True): + buf = "" + for _ in range(n): + if self._current not in " \t\n\r#,]}" + self._src.EOF: + buf += self._current + self.inc() + continue + + break + return buf + + def _peek_unicode(self, is_long: bool) -> Tuple[Optional[str], Optional[str]]: + """ + Peeks ahead non-intrusively by cloning then restoring the + initial state of the parser. + + Returns the unicode value is it's a valid one else None. + """ + # we always want to restore after exiting this scope + with self._state(save_marker=True, restore=True): + if self._current not in {"u", "U"}: + raise self.parse_error( + InternalParserError, "_peek_unicode() entered on non-unicode value" + ) + + self.inc() # Dropping prefix + self.mark() + + if is_long: + chars = 8 + else: + chars = 4 + + if not self.inc_n(chars): + value, extracted = None, None + else: + extracted = self.extract() + + if extracted[0].lower() == "d" and extracted[1].strip("01234567"): + return None, None + + try: + value = chr(int(extracted, 16)) + except (ValueError, OverflowError): + value = None + + return value, extracted diff --git a/src/poetry/core/_vendor/tomlkit/py.typed b/src/poetry/core/_vendor/tomlkit/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/_vendor/tomlkit/source.py b/src/poetry/core/_vendor/tomlkit/source.py new file mode 100644 index 0000000..d1a53cd --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/source.py @@ -0,0 +1,181 @@ +from copy import copy +from typing import Any +from typing import Optional +from typing import Tuple +from typing import Type + +from tomlkit.exceptions import ParseError +from tomlkit.exceptions import UnexpectedCharError +from tomlkit.toml_char import TOMLChar + + +class _State: + def __init__( + self, + source: "Source", + save_marker: Optional[bool] = False, + restore: Optional[bool] = False, + ) -> None: + self._source = source + self._save_marker = save_marker + self.restore = restore + + def __enter__(self) -> "_State": + # Entering this context manager - save the state + self._chars = copy(self._source._chars) + self._idx = self._source._idx + self._current = self._source._current + self._marker = self._source._marker + + return self + + def __exit__(self, exception_type, exception_val, trace): + # Exiting this context manager - restore the prior state + if self.restore or exception_type: + self._source._chars = self._chars + self._source._idx = self._idx + self._source._current = self._current + if self._save_marker: + self._source._marker = self._marker + + +class _StateHandler: + """ + State preserver for the Parser. + """ + + def __init__(self, source: "Source") -> None: + self._source = source + self._states = [] + + def __call__(self, *args, **kwargs): + return _State(self._source, *args, **kwargs) + + def __enter__(self) -> None: + state = self() + self._states.append(state) + return state.__enter__() + + def __exit__(self, exception_type, exception_val, trace): + state = self._states.pop() + return state.__exit__(exception_type, exception_val, trace) + + +class Source(str): + EOF = TOMLChar("\0") + + def __init__(self, _: str) -> None: + super().__init__() + + # Collection of TOMLChars + self._chars = iter([(i, TOMLChar(c)) for i, c in enumerate(self)]) + + self._idx = 0 + self._marker = 0 + self._current = TOMLChar("") + + self._state = _StateHandler(self) + + self.inc() + + def reset(self): + # initialize both idx and current + self.inc() + + # reset marker + self.mark() + + @property + def state(self) -> _StateHandler: + return self._state + + @property + def idx(self) -> int: + return self._idx + + @property + def current(self) -> TOMLChar: + return self._current + + @property + def marker(self) -> int: + return self._marker + + def extract(self) -> str: + """ + Extracts the value between marker and index + """ + return self[self._marker : self._idx] + + def inc(self, exception: Optional[Type[ParseError]] = None) -> bool: + """ + Increments the parser if the end of the input has not been reached. + Returns whether or not it was able to advance. + """ + try: + self._idx, self._current = next(self._chars) + + return True + except StopIteration: + self._idx = len(self) + self._current = self.EOF + if exception: + raise self.parse_error(exception) + + return False + + def inc_n(self, n: int, exception: Optional[Type[ParseError]] = None) -> bool: + """ + Increments the parser by n characters + if the end of the input has not been reached. + """ + return all(self.inc(exception=exception) for _ in range(n)) + + def consume(self, chars, min=0, max=-1): + """ + Consume chars until min/max is satisfied is valid. + """ + while self.current in chars and max != 0: + min -= 1 + max -= 1 + if not self.inc(): + break + + # failed to consume minimum number of characters + if min > 0: + raise self.parse_error(UnexpectedCharError, self.current) + + def end(self) -> bool: + """ + Returns True if the parser has reached the end of the input. + """ + return self._current is self.EOF + + def mark(self) -> None: + """ + Sets the marker to the index's current position + """ + self._marker = self._idx + + def parse_error( + self, + exception: Type[ParseError] = ParseError, + *args: Any, + **kwargs: Any, + ) -> ParseError: + """ + Creates a generic "parse error" at the current position. + """ + line, col = self._to_linecol() + + return exception(line, col, *args, **kwargs) + + def _to_linecol(self) -> Tuple[int, int]: + cur = 0 + for i, line in enumerate(self.splitlines()): + if cur + len(line) + 1 > self.idx: + return (i + 1, self.idx - cur) + + cur += len(line) + 1 + + return len(self.splitlines()), 0 diff --git a/src/poetry/core/_vendor/tomlkit/toml_char.py b/src/poetry/core/_vendor/tomlkit/toml_char.py new file mode 100644 index 0000000..b4bb411 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/toml_char.py @@ -0,0 +1,52 @@ +import string + + +class TOMLChar(str): + def __init__(self, c): + super().__init__() + + if len(self) > 1: + raise ValueError("A TOML character must be of length 1") + + BARE = string.ascii_letters + string.digits + "-_" + KV = "= \t" + NUMBER = string.digits + "+-_.e" + SPACES = " \t" + NL = "\n\r" + WS = SPACES + NL + + def is_bare_key_char(self) -> bool: + """ + Whether the character is a valid bare key name or not. + """ + return self in self.BARE + + def is_kv_sep(self) -> bool: + """ + Whether the character is a valid key/value separator or not. + """ + return self in self.KV + + def is_int_float_char(self) -> bool: + """ + Whether the character if a valid integer or float value character or not. + """ + return self in self.NUMBER + + def is_ws(self) -> bool: + """ + Whether the character is a whitespace character or not. + """ + return self in self.WS + + def is_nl(self) -> bool: + """ + Whether the character is a new line character or not. + """ + return self in self.NL + + def is_spaces(self) -> bool: + """ + Whether the character is a space or not + """ + return self in self.SPACES diff --git a/src/poetry/core/_vendor/tomlkit/toml_document.py b/src/poetry/core/_vendor/tomlkit/toml_document.py new file mode 100644 index 0000000..71fac2e --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/toml_document.py @@ -0,0 +1,7 @@ +from tomlkit.container import Container + + +class TOMLDocument(Container): + """ + A TOML document. + """ diff --git a/src/poetry/core/_vendor/tomlkit/toml_file.py b/src/poetry/core/_vendor/tomlkit/toml_file.py new file mode 100644 index 0000000..7459130 --- /dev/null +++ b/src/poetry/core/_vendor/tomlkit/toml_file.py @@ -0,0 +1,58 @@ +import os +import re + +from typing import TYPE_CHECKING + +from tomlkit.api import loads +from tomlkit.toml_document import TOMLDocument + + +if TYPE_CHECKING: + from _typeshed import StrPath as _StrPath +else: + from typing import Union + + _StrPath = Union[str, os.PathLike] + + +class TOMLFile: + """ + Represents a TOML file. + + :param path: path to the TOML file + """ + + def __init__(self, path: _StrPath) -> None: + self._path = path + self._linesep = os.linesep + + def read(self) -> TOMLDocument: + """Read the file content as a :class:`tomlkit.toml_document.TOMLDocument`.""" + with open(self._path, encoding="utf-8", newline="") as f: + content = f.read() + + # check if consistent line endings + num_newline = content.count("\n") + if num_newline > 0: + num_win_eol = content.count("\r\n") + if num_win_eol == num_newline: + self._linesep = "\r\n" + elif num_win_eol == 0: + self._linesep = "\n" + else: + self._linesep = "mixed" + + return loads(content) + + def write(self, data: TOMLDocument) -> None: + """Write the TOMLDocument to the file.""" + content = data.as_string() + + # apply linesep + if self._linesep == "\n": + content = content.replace("\r\n", "\n") + elif self._linesep == "\r\n": + content = re.sub(r"(? 0) and (alen >= elen - num_tv_tuples): + return + raise TypeError(f"Too {'many' if alen > elen else 'few'} parameters for {cls};" + f" actual {alen}, expected {elen}") + + +if sys.version_info >= (3, 10): + def _should_collect_from_parameters(t): + return isinstance( + t, (typing._GenericAlias, _types.GenericAlias, _types.UnionType) + ) +elif sys.version_info >= (3, 9): + def _should_collect_from_parameters(t): + return isinstance(t, (typing._GenericAlias, _types.GenericAlias)) +else: + def _should_collect_from_parameters(t): + return isinstance(t, typing._GenericAlias) and not t._special + + +def _collect_type_vars(types, typevar_types=None): + """Collect all type variable contained in types in order of + first appearance (lexicographic order). For example:: + + _collect_type_vars((T, List[S, T])) == (T, S) + """ + if typevar_types is None: + typevar_types = typing.TypeVar + tvars = [] + for t in types: + if ( + isinstance(t, typevar_types) and + t not in tvars and + not _is_unpack(t) + ): + tvars.append(t) + if _should_collect_from_parameters(t): + tvars.extend([t for t in t.__parameters__ if t not in tvars]) + return tuple(tvars) + + +NoReturn = typing.NoReturn + +# Some unconstrained type variables. These are used by the container types. +# (These are not for export.) +T = typing.TypeVar('T') # Any type. +KT = typing.TypeVar('KT') # Key type. +VT = typing.TypeVar('VT') # Value type. +T_co = typing.TypeVar('T_co', covariant=True) # Any type covariant containers. +T_contra = typing.TypeVar('T_contra', contravariant=True) # Ditto contravariant. + + +if sys.version_info >= (3, 11): + from typing import Any +else: + + class _AnyMeta(type): + def __instancecheck__(self, obj): + if self is Any: + raise TypeError("typing_extensions.Any cannot be used with isinstance()") + return super().__instancecheck__(obj) + + def __repr__(self): + if self is Any: + return "typing_extensions.Any" + return super().__repr__() + + class Any(metaclass=_AnyMeta): + """Special type indicating an unconstrained type. + - Any is compatible with every type. + - Any assumed to have all methods. + - All values assumed to be instances of Any. + Note that all the above statements are true from the point of view of + static type checkers. At runtime, Any should not be used with instance + checks. + """ + def __new__(cls, *args, **kwargs): + if cls is Any: + raise TypeError("Any cannot be instantiated") + return super().__new__(cls, *args, **kwargs) + + +ClassVar = typing.ClassVar + +# On older versions of typing there is an internal class named "Final". +# 3.8+ +if hasattr(typing, 'Final') and sys.version_info[:2] >= (3, 7): + Final = typing.Final +# 3.7 +else: + class _FinalForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + Final = _FinalForm('Final', + doc="""A special typing construct to indicate that a name + cannot be re-assigned or overridden in a subclass. + For example: + + MAX_SIZE: Final = 9000 + MAX_SIZE += 1 # Error reported by type checker + + class Connection: + TIMEOUT: Final[int] = 10 + class FastConnector(Connection): + TIMEOUT = 1 # Error reported by type checker + + There is no runtime checking of these properties.""") + +if sys.version_info >= (3, 11): + final = typing.final +else: + # @final exists in 3.8+, but we backport it for all versions + # before 3.11 to keep support for the __final__ attribute. + # See https://bugs.python.org/issue46342 + def final(f): + """This decorator can be used to indicate to type checkers that + the decorated method cannot be overridden, and decorated class + cannot be subclassed. For example: + + class Base: + @final + def done(self) -> None: + ... + class Sub(Base): + def done(self) -> None: # Error reported by type checker + ... + @final + class Leaf: + ... + class Other(Leaf): # Error reported by type checker + ... + + There is no runtime checking of these properties. The decorator + sets the ``__final__`` attribute to ``True`` on the decorated object + to allow runtime introspection. + """ + try: + f.__final__ = True + except (AttributeError, TypeError): + # Skip the attribute silently if it is not writable. + # AttributeError happens if the object has __slots__ or a + # read-only property, TypeError if it's a builtin class. + pass + return f + + +def IntVar(name): + return typing.TypeVar(name) + + +# 3.8+: +if hasattr(typing, 'Literal'): + Literal = typing.Literal +# 3.7: +else: + class _LiteralForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return typing._GenericAlias(self, parameters) + + Literal = _LiteralForm('Literal', + doc="""A type that can be used to indicate to type checkers + that the corresponding value has a value literally equivalent + to the provided parameter. For example: + + var: Literal[4] = 4 + + The type checker understands that 'var' is literally equal to + the value 4 and no other value. + + Literal[...] cannot be subclassed. There is no runtime + checking verifying that the parameter is actually a value + instead of a type.""") + + +_overload_dummy = typing._overload_dummy # noqa + + +if hasattr(typing, "get_overloads"): # 3.11+ + overload = typing.overload + get_overloads = typing.get_overloads + clear_overloads = typing.clear_overloads +else: + # {module: {qualname: {firstlineno: func}}} + _overload_registry = collections.defaultdict( + functools.partial(collections.defaultdict, dict) + ) + + def overload(func): + """Decorator for overloaded functions/methods. + + In a stub file, place two or more stub definitions for the same + function in a row, each decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + + In a non-stub file (i.e. a regular .py file), do the same but + follow it with an implementation. The implementation should *not* + be decorated with @overload. For example: + + @overload + def utf8(value: None) -> None: ... + @overload + def utf8(value: bytes) -> bytes: ... + @overload + def utf8(value: str) -> bytes: ... + def utf8(value): + # implementation goes here + + The overloads for a function can be retrieved at runtime using the + get_overloads() function. + """ + # classmethod and staticmethod + f = getattr(func, "__func__", func) + try: + _overload_registry[f.__module__][f.__qualname__][ + f.__code__.co_firstlineno + ] = func + except AttributeError: + # Not a normal function; ignore. + pass + return _overload_dummy + + def get_overloads(func): + """Return all defined overloads for *func* as a sequence.""" + # classmethod and staticmethod + f = getattr(func, "__func__", func) + if f.__module__ not in _overload_registry: + return [] + mod_dict = _overload_registry[f.__module__] + if f.__qualname__ not in mod_dict: + return [] + return list(mod_dict[f.__qualname__].values()) + + def clear_overloads(): + """Clear all overloads in the registry.""" + _overload_registry.clear() + + +# This is not a real generic class. Don't use outside annotations. +Type = typing.Type + +# Various ABCs mimicking those in collections.abc. +# A few are simply re-exported for completeness. + + +Awaitable = typing.Awaitable +Coroutine = typing.Coroutine +AsyncIterable = typing.AsyncIterable +AsyncIterator = typing.AsyncIterator +Deque = typing.Deque +ContextManager = typing.ContextManager +AsyncContextManager = typing.AsyncContextManager +DefaultDict = typing.DefaultDict + +# 3.7.2+ +if hasattr(typing, 'OrderedDict'): + OrderedDict = typing.OrderedDict +# 3.7.0-3.7.2 +else: + OrderedDict = typing._alias(collections.OrderedDict, (KT, VT)) + +Counter = typing.Counter +ChainMap = typing.ChainMap +AsyncGenerator = typing.AsyncGenerator +NewType = typing.NewType +Text = typing.Text +TYPE_CHECKING = typing.TYPE_CHECKING + + +_PROTO_WHITELIST = ['Callable', 'Awaitable', + 'Iterable', 'Iterator', 'AsyncIterable', 'AsyncIterator', + 'Hashable', 'Sized', 'Container', 'Collection', 'Reversible', + 'ContextManager', 'AsyncContextManager'] + + +def _get_protocol_attrs(cls): + attrs = set() + for base in cls.__mro__[:-1]: # without object + if base.__name__ in ('Protocol', 'Generic'): + continue + annotations = getattr(base, '__annotations__', {}) + for attr in list(base.__dict__.keys()) + list(annotations.keys()): + if (not attr.startswith('_abc_') and attr not in ( + '__abstractmethods__', '__annotations__', '__weakref__', + '_is_protocol', '_is_runtime_protocol', '__dict__', + '__args__', '__slots__', + '__next_in_mro__', '__parameters__', '__origin__', + '__orig_bases__', '__extra__', '__tree_hash__', + '__doc__', '__subclasshook__', '__init__', '__new__', + '__module__', '_MutableMapping__marker', '_gorg')): + attrs.add(attr) + return attrs + + +def _is_callable_members_only(cls): + return all(callable(getattr(cls, attr, None)) for attr in _get_protocol_attrs(cls)) + + +def _maybe_adjust_parameters(cls): + """Helper function used in Protocol.__init_subclass__ and _TypedDictMeta.__new__. + + The contents of this function are very similar + to logic found in typing.Generic.__init_subclass__ + on the CPython main branch. + """ + tvars = [] + if '__orig_bases__' in cls.__dict__: + tvars = typing._collect_type_vars(cls.__orig_bases__) + # Look for Generic[T1, ..., Tn] or Protocol[T1, ..., Tn]. + # If found, tvars must be a subset of it. + # If not found, tvars is it. + # Also check for and reject plain Generic, + # and reject multiple Generic[...] and/or Protocol[...]. + gvars = None + for base in cls.__orig_bases__: + if (isinstance(base, typing._GenericAlias) and + base.__origin__ in (typing.Generic, Protocol)): + # for error messages + the_base = base.__origin__.__name__ + if gvars is not None: + raise TypeError( + "Cannot inherit from Generic[...]" + " and/or Protocol[...] multiple types.") + gvars = base.__parameters__ + if gvars is None: + gvars = tvars + else: + tvarset = set(tvars) + gvarset = set(gvars) + if not tvarset <= gvarset: + s_vars = ', '.join(str(t) for t in tvars if t not in gvarset) + s_args = ', '.join(str(g) for g in gvars) + raise TypeError(f"Some type variables ({s_vars}) are" + f" not listed in {the_base}[{s_args}]") + tvars = gvars + cls.__parameters__ = tuple(tvars) + + +# 3.8+ +if hasattr(typing, 'Protocol'): + Protocol = typing.Protocol +# 3.7 +else: + + def _no_init(self, *args, **kwargs): + if type(self)._is_protocol: + raise TypeError('Protocols cannot be instantiated') + + class _ProtocolMeta(abc.ABCMeta): # noqa: B024 + # This metaclass is a bit unfortunate and exists only because of the lack + # of __instancehook__. + def __instancecheck__(cls, instance): + # We need this method for situations where attributes are + # assigned in __init__. + if ((not getattr(cls, '_is_protocol', False) or + _is_callable_members_only(cls)) and + issubclass(instance.__class__, cls)): + return True + if cls._is_protocol: + if all(hasattr(instance, attr) and + (not callable(getattr(cls, attr, None)) or + getattr(instance, attr) is not None) + for attr in _get_protocol_attrs(cls)): + return True + return super().__instancecheck__(instance) + + class Protocol(metaclass=_ProtocolMeta): + # There is quite a lot of overlapping code with typing.Generic. + # Unfortunately it is hard to avoid this while these live in two different + # modules. The duplicated code will be removed when Protocol is moved to typing. + """Base class for protocol classes. Protocol classes are defined as:: + + class Proto(Protocol): + def meth(self) -> int: + ... + + Such classes are primarily used with static type checkers that recognize + structural subtyping (static duck-typing), for example:: + + class C: + def meth(self) -> int: + return 0 + + def func(x: Proto) -> int: + return x.meth() + + func(C()) # Passes static type check + + See PEP 544 for details. Protocol classes decorated with + @typing_extensions.runtime act as simple-minded runtime protocol that checks + only the presence of given attributes, ignoring their type signatures. + + Protocol classes can be generic, they are defined as:: + + class GenProto(Protocol[T]): + def meth(self) -> T: + ... + """ + __slots__ = () + _is_protocol = True + + def __new__(cls, *args, **kwds): + if cls is Protocol: + raise TypeError("Type Protocol cannot be instantiated; " + "it can only be used as a base class") + return super().__new__(cls) + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple): + params = (params,) + if not params and cls is not typing.Tuple: + raise TypeError( + f"Parameter list to {cls.__qualname__}[...] cannot be empty") + msg = "Parameters to generic types must be types." + params = tuple(typing._type_check(p, msg) for p in params) # noqa + if cls is Protocol: + # Generic can only be subscripted with unique type variables. + if not all(isinstance(p, typing.TypeVar) for p in params): + i = 0 + while isinstance(params[i], typing.TypeVar): + i += 1 + raise TypeError( + "Parameters to Protocol[...] must all be type variables." + f" Parameter {i + 1} is {params[i]}") + if len(set(params)) != len(params): + raise TypeError( + "Parameters to Protocol[...] must all be unique") + else: + # Subscripting a regular Generic subclass. + _check_generic(cls, params, len(cls.__parameters__)) + return typing._GenericAlias(cls, params) + + def __init_subclass__(cls, *args, **kwargs): + if '__orig_bases__' in cls.__dict__: + error = typing.Generic in cls.__orig_bases__ + else: + error = typing.Generic in cls.__bases__ + if error: + raise TypeError("Cannot inherit from plain Generic") + _maybe_adjust_parameters(cls) + + # Determine if this is a protocol or a concrete subclass. + if not cls.__dict__.get('_is_protocol', None): + cls._is_protocol = any(b is Protocol for b in cls.__bases__) + + # Set (or override) the protocol subclass hook. + def _proto_hook(other): + if not cls.__dict__.get('_is_protocol', None): + return NotImplemented + if not getattr(cls, '_is_runtime_protocol', False): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Instance and class checks can only be used with" + " @runtime protocols") + if not _is_callable_members_only(cls): + if sys._getframe(2).f_globals['__name__'] in ['abc', 'functools']: + return NotImplemented + raise TypeError("Protocols with non-method members" + " don't support issubclass()") + if not isinstance(other, type): + # Same error as for issubclass(1, int) + raise TypeError('issubclass() arg 1 must be a class') + for attr in _get_protocol_attrs(cls): + for base in other.__mro__: + if attr in base.__dict__: + if base.__dict__[attr] is None: + return NotImplemented + break + annotations = getattr(base, '__annotations__', {}) + if (isinstance(annotations, typing.Mapping) and + attr in annotations and + isinstance(other, _ProtocolMeta) and + other._is_protocol): + break + else: + return NotImplemented + return True + if '__subclasshook__' not in cls.__dict__: + cls.__subclasshook__ = _proto_hook + + # We have nothing more to do for non-protocols. + if not cls._is_protocol: + return + + # Check consistency of bases. + for base in cls.__bases__: + if not (base in (object, typing.Generic) or + base.__module__ == 'collections.abc' and + base.__name__ in _PROTO_WHITELIST or + isinstance(base, _ProtocolMeta) and base._is_protocol): + raise TypeError('Protocols can only inherit from other' + f' protocols, got {repr(base)}') + cls.__init__ = _no_init + + +# 3.8+ +if hasattr(typing, 'runtime_checkable'): + runtime_checkable = typing.runtime_checkable +# 3.7 +else: + def runtime_checkable(cls): + """Mark a protocol class as a runtime protocol, so that it + can be used with isinstance() and issubclass(). Raise TypeError + if applied to a non-protocol class. + + This allows a simple-minded structural check very similar to the + one-offs in collections.abc such as Hashable. + """ + if not isinstance(cls, _ProtocolMeta) or not cls._is_protocol: + raise TypeError('@runtime_checkable can be only applied to protocol classes,' + f' got {cls!r}') + cls._is_runtime_protocol = True + return cls + + +# Exists for backwards compatibility. +runtime = runtime_checkable + + +# 3.8+ +if hasattr(typing, 'SupportsIndex'): + SupportsIndex = typing.SupportsIndex +# 3.7 +else: + @runtime_checkable + class SupportsIndex(Protocol): + __slots__ = () + + @abc.abstractmethod + def __index__(self) -> int: + pass + + +if hasattr(typing, "Required"): + # The standard library TypedDict in Python 3.8 does not store runtime information + # about which (if any) keys are optional. See https://bugs.python.org/issue38834 + # The standard library TypedDict in Python 3.9.0/1 does not honour the "total" + # keyword with old-style TypedDict(). See https://bugs.python.org/issue42059 + # The standard library TypedDict below Python 3.11 does not store runtime + # information about optional and required keys when using Required or NotRequired. + # Generic TypedDicts are also impossible using typing.TypedDict on Python <3.11. + TypedDict = typing.TypedDict + _TypedDictMeta = typing._TypedDictMeta + is_typeddict = typing.is_typeddict +else: + def _check_fails(cls, other): + try: + if sys._getframe(1).f_globals['__name__'] not in ['abc', + 'functools', + 'typing']: + # Typed dicts are only for static structural subtyping. + raise TypeError('TypedDict does not support instance and class checks') + except (AttributeError, ValueError): + pass + return False + + def _dict_new(*args, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + return dict(*args, **kwargs) + + _dict_new.__text_signature__ = '($cls, _typename, _fields=None, /, **kwargs)' + + def _typeddict_new(*args, total=True, **kwargs): + if not args: + raise TypeError('TypedDict.__new__(): not enough arguments') + _, args = args[0], args[1:] # allow the "cls" keyword be passed + if args: + typename, args = args[0], args[1:] # allow the "_typename" keyword be passed + elif '_typename' in kwargs: + typename = kwargs.pop('_typename') + import warnings + warnings.warn("Passing '_typename' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + raise TypeError("TypedDict.__new__() missing 1 required positional " + "argument: '_typename'") + if args: + try: + fields, = args # allow the "_fields" keyword be passed + except ValueError: + raise TypeError('TypedDict.__new__() takes from 2 to 3 ' + f'positional arguments but {len(args) + 2} ' + 'were given') + elif '_fields' in kwargs and len(kwargs) == 1: + fields = kwargs.pop('_fields') + import warnings + warnings.warn("Passing '_fields' as keyword argument is deprecated", + DeprecationWarning, stacklevel=2) + else: + fields = None + + if fields is None: + fields = kwargs + elif kwargs: + raise TypeError("TypedDict takes either a dict or keyword arguments," + " but not both") + + ns = {'__annotations__': dict(fields)} + try: + # Setting correct module is necessary to make typed dict classes pickleable. + ns['__module__'] = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + pass + + return _TypedDictMeta(typename, (), ns, total=total) + + _typeddict_new.__text_signature__ = ('($cls, _typename, _fields=None,' + ' /, *, total=True, **kwargs)') + + class _TypedDictMeta(type): + def __init__(cls, name, bases, ns, total=True): + super().__init__(name, bases, ns) + + def __new__(cls, name, bases, ns, total=True): + # Create new typed dict class object. + # This method is called directly when TypedDict is subclassed, + # or via _typeddict_new when TypedDict is instantiated. This way + # TypedDict supports all three syntaxes described in its docstring. + # Subclasses and instances of TypedDict return actual dictionaries + # via _dict_new. + ns['__new__'] = _typeddict_new if name == 'TypedDict' else _dict_new + # Don't insert typing.Generic into __bases__ here, + # or Generic.__init_subclass__ will raise TypeError + # in the super().__new__() call. + # Instead, monkey-patch __bases__ onto the class after it's been created. + tp_dict = super().__new__(cls, name, (dict,), ns) + + if any(issubclass(base, typing.Generic) for base in bases): + tp_dict.__bases__ = (typing.Generic, dict) + _maybe_adjust_parameters(tp_dict) + + annotations = {} + own_annotations = ns.get('__annotations__', {}) + msg = "TypedDict('Name', {f0: t0, f1: t1, ...}); each t must be a type" + own_annotations = { + n: typing._type_check(tp, msg) for n, tp in own_annotations.items() + } + required_keys = set() + optional_keys = set() + + for base in bases: + annotations.update(base.__dict__.get('__annotations__', {})) + required_keys.update(base.__dict__.get('__required_keys__', ())) + optional_keys.update(base.__dict__.get('__optional_keys__', ())) + + annotations.update(own_annotations) + for annotation_key, annotation_type in own_annotations.items(): + annotation_origin = get_origin(annotation_type) + if annotation_origin is Annotated: + annotation_args = get_args(annotation_type) + if annotation_args: + annotation_type = annotation_args[0] + annotation_origin = get_origin(annotation_type) + + if annotation_origin is Required: + required_keys.add(annotation_key) + elif annotation_origin is NotRequired: + optional_keys.add(annotation_key) + elif total: + required_keys.add(annotation_key) + else: + optional_keys.add(annotation_key) + + tp_dict.__annotations__ = annotations + tp_dict.__required_keys__ = frozenset(required_keys) + tp_dict.__optional_keys__ = frozenset(optional_keys) + if not hasattr(tp_dict, '__total__'): + tp_dict.__total__ = total + return tp_dict + + __instancecheck__ = __subclasscheck__ = _check_fails + + TypedDict = _TypedDictMeta('TypedDict', (dict,), {}) + TypedDict.__module__ = __name__ + TypedDict.__doc__ = \ + """A simple typed name space. At runtime it is equivalent to a plain dict. + + TypedDict creates a dictionary type that expects all of its + instances to have a certain set of keys, with each key + associated with a value of a consistent type. This expectation + is not checked at runtime but is only enforced by type checkers. + Usage:: + + class Point2D(TypedDict): + x: int + y: int + label: str + + a: Point2D = {'x': 1, 'y': 2, 'label': 'good'} # OK + b: Point2D = {'z': 3, 'label': 'bad'} # Fails type check + + assert Point2D(x=1, y=2, label='first') == dict(x=1, y=2, label='first') + + The type info can be accessed via the Point2D.__annotations__ dict, and + the Point2D.__required_keys__ and Point2D.__optional_keys__ frozensets. + TypedDict supports two additional equivalent forms:: + + Point2D = TypedDict('Point2D', x=int, y=int, label=str) + Point2D = TypedDict('Point2D', {'x': int, 'y': int, 'label': str}) + + The class syntax is only supported in Python 3.6+, while two other + syntax forms work for Python 2.7 and 3.2+ + """ + + if hasattr(typing, "_TypedDictMeta"): + _TYPEDDICT_TYPES = (typing._TypedDictMeta, _TypedDictMeta) + else: + _TYPEDDICT_TYPES = (_TypedDictMeta,) + + def is_typeddict(tp): + """Check if an annotation is a TypedDict class + + For example:: + class Film(TypedDict): + title: str + year: int + + is_typeddict(Film) # => True + is_typeddict(Union[list, str]) # => False + """ + return isinstance(tp, tuple(_TYPEDDICT_TYPES)) + + +if hasattr(typing, "assert_type"): + assert_type = typing.assert_type + +else: + def assert_type(__val, __typ): + """Assert (to the type checker) that the value is of the given type. + + When the type checker encounters a call to assert_type(), it + emits an error if the value is not of the specified type:: + + def greet(name: str) -> None: + assert_type(name, str) # ok + assert_type(name, int) # type checker error + + At runtime this returns the first argument unchanged and otherwise + does nothing. + """ + return __val + + +if hasattr(typing, "Required"): + get_type_hints = typing.get_type_hints +else: + import functools + import types + + # replaces _strip_annotations() + def _strip_extras(t): + """Strips Annotated, Required and NotRequired from a given type.""" + if isinstance(t, _AnnotatedAlias): + return _strip_extras(t.__origin__) + if hasattr(t, "__origin__") and t.__origin__ in (Required, NotRequired): + return _strip_extras(t.__args__[0]) + if isinstance(t, typing._GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return t.copy_with(stripped_args) + if hasattr(types, "GenericAlias") and isinstance(t, types.GenericAlias): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return types.GenericAlias(t.__origin__, stripped_args) + if hasattr(types, "UnionType") and isinstance(t, types.UnionType): + stripped_args = tuple(_strip_extras(a) for a in t.__args__) + if stripped_args == t.__args__: + return t + return functools.reduce(operator.or_, stripped_args) + + return t + + def get_type_hints(obj, globalns=None, localns=None, include_extras=False): + """Return type hints for an object. + + This is often the same as obj.__annotations__, but it handles + forward references encoded as string literals, adds Optional[t] if a + default value equal to None is set and recursively replaces all + 'Annotated[T, ...]', 'Required[T]' or 'NotRequired[T]' with 'T' + (unless 'include_extras=True'). + + The argument may be a module, class, method, or function. The annotations + are returned as a dictionary. For classes, annotations include also + inherited members. + + TypeError is raised if the argument is not of a type that can contain + annotations, and an empty dictionary is returned if no annotations are + present. + + BEWARE -- the behavior of globalns and localns is counterintuitive + (unless you are familiar with how eval() and exec() work). The + search order is locals first, then globals. + + - If no dict arguments are passed, an attempt is made to use the + globals from obj (or the respective module's globals for classes), + and these are also used as the locals. If the object does not appear + to have globals, an empty dictionary is used. + + - If one dict argument is passed, it is used for both globals and + locals. + + - If two dict arguments are passed, they specify globals and + locals, respectively. + """ + if hasattr(typing, "Annotated"): + hint = typing.get_type_hints( + obj, globalns=globalns, localns=localns, include_extras=True + ) + else: + hint = typing.get_type_hints(obj, globalns=globalns, localns=localns) + if include_extras: + return hint + return {k: _strip_extras(t) for k, t in hint.items()} + + +# Python 3.9+ has PEP 593 (Annotated) +if hasattr(typing, 'Annotated'): + Annotated = typing.Annotated + # Not exported and not a public API, but needed for get_origin() and get_args() + # to work. + _AnnotatedAlias = typing._AnnotatedAlias +# 3.7-3.8 +else: + class _AnnotatedAlias(typing._GenericAlias, _root=True): + """Runtime representation of an annotated type. + + At its core 'Annotated[t, dec1, dec2, ...]' is an alias for the type 't' + with extra annotations. The alias behaves like a normal typing alias, + instantiating is the same as instantiating the underlying type, binding + it to types is also the same. + """ + def __init__(self, origin, metadata): + if isinstance(origin, _AnnotatedAlias): + metadata = origin.__metadata__ + metadata + origin = origin.__origin__ + super().__init__(origin, origin) + self.__metadata__ = metadata + + def copy_with(self, params): + assert len(params) == 1 + new_type = params[0] + return _AnnotatedAlias(new_type, self.__metadata__) + + def __repr__(self): + return (f"typing_extensions.Annotated[{typing._type_repr(self.__origin__)}, " + f"{', '.join(repr(a) for a in self.__metadata__)}]") + + def __reduce__(self): + return operator.getitem, ( + Annotated, (self.__origin__,) + self.__metadata__ + ) + + def __eq__(self, other): + if not isinstance(other, _AnnotatedAlias): + return NotImplemented + if self.__origin__ != other.__origin__: + return False + return self.__metadata__ == other.__metadata__ + + def __hash__(self): + return hash((self.__origin__, self.__metadata__)) + + class Annotated: + """Add context specific metadata to a type. + + Example: Annotated[int, runtime_check.Unsigned] indicates to the + hypothetical runtime_check module that this type is an unsigned int. + Every other consumer of this type can ignore this metadata and treat + this type as int. + + The first argument to Annotated must be a valid type (and will be in + the __origin__ field), the remaining arguments are kept as a tuple in + the __extra__ field. + + Details: + + - It's an error to call `Annotated` with less than two arguments. + - Nested Annotated are flattened:: + + Annotated[Annotated[T, Ann1, Ann2], Ann3] == Annotated[T, Ann1, Ann2, Ann3] + + - Instantiating an annotated type is equivalent to instantiating the + underlying type:: + + Annotated[C, Ann1](5) == C(5) + + - Annotated can be used as a generic type alias:: + + Optimized = Annotated[T, runtime.Optimize()] + Optimized[int] == Annotated[int, runtime.Optimize()] + + OptimizedList = Annotated[List[T], runtime.Optimize()] + OptimizedList[int] == Annotated[List[int], runtime.Optimize()] + """ + + __slots__ = () + + def __new__(cls, *args, **kwargs): + raise TypeError("Type Annotated cannot be instantiated.") + + @typing._tp_cache + def __class_getitem__(cls, params): + if not isinstance(params, tuple) or len(params) < 2: + raise TypeError("Annotated[...] should be used " + "with at least two arguments (a type and an " + "annotation).") + allowed_special_forms = (ClassVar, Final) + if get_origin(params[0]) in allowed_special_forms: + origin = params[0] + else: + msg = "Annotated[t, ...]: t must be a type." + origin = typing._type_check(params[0], msg) + metadata = tuple(params[1:]) + return _AnnotatedAlias(origin, metadata) + + def __init_subclass__(cls, *args, **kwargs): + raise TypeError( + f"Cannot subclass {cls.__module__}.Annotated" + ) + +# Python 3.8 has get_origin() and get_args() but those implementations aren't +# Annotated-aware, so we can't use those. Python 3.9's versions don't support +# ParamSpecArgs and ParamSpecKwargs, so only Python 3.10's versions will do. +if sys.version_info[:2] >= (3, 10): + get_origin = typing.get_origin + get_args = typing.get_args +# 3.7-3.9 +else: + try: + # 3.9+ + from typing import _BaseGenericAlias + except ImportError: + _BaseGenericAlias = typing._GenericAlias + try: + # 3.9+ + from typing import GenericAlias as _typing_GenericAlias + except ImportError: + _typing_GenericAlias = typing._GenericAlias + + def get_origin(tp): + """Get the unsubscripted version of a type. + + This supports generic types, Callable, Tuple, Union, Literal, Final, ClassVar + and Annotated. Return None for unsupported types. Examples:: + + get_origin(Literal[42]) is Literal + get_origin(int) is None + get_origin(ClassVar[int]) is ClassVar + get_origin(Generic) is Generic + get_origin(Generic[T]) is Generic + get_origin(Union[T, int]) is Union + get_origin(List[Tuple[T, T]][int]) == list + get_origin(P.args) is P + """ + if isinstance(tp, _AnnotatedAlias): + return Annotated + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias, _BaseGenericAlias, + ParamSpecArgs, ParamSpecKwargs)): + return tp.__origin__ + if tp is typing.Generic: + return typing.Generic + return None + + def get_args(tp): + """Get type arguments with all substitutions performed. + + For unions, basic simplifications used by Union constructor are performed. + Examples:: + get_args(Dict[str, int]) == (str, int) + get_args(int) == () + get_args(Union[int, Union[T, int], str][int]) == (int, str) + get_args(Union[int, Tuple[T, int]][str]) == (int, Tuple[str, int]) + get_args(Callable[[], T][int]) == ([], int) + """ + if isinstance(tp, _AnnotatedAlias): + return (tp.__origin__,) + tp.__metadata__ + if isinstance(tp, (typing._GenericAlias, _typing_GenericAlias)): + if getattr(tp, "_special", False): + return () + res = tp.__args__ + if get_origin(tp) is collections.abc.Callable and res[0] is not Ellipsis: + res = (list(res[:-1]), res[-1]) + return res + return () + + +# 3.10+ +if hasattr(typing, 'TypeAlias'): + TypeAlias = typing.TypeAlias +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeAliasForm + def TypeAlias(self, parameters): + """Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example above. + """ + raise TypeError(f"{self} is not subscriptable") +# 3.7-3.8 +else: + class _TypeAliasForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + TypeAlias = _TypeAliasForm('TypeAlias', + doc="""Special marker indicating that an assignment should + be recognized as a proper type alias definition by type + checkers. + + For example:: + + Predicate: TypeAlias = Callable[..., bool] + + It's invalid when used anywhere except as in the example + above.""") + + +class _DefaultMixin: + """Mixin for TypeVarLike defaults.""" + + __slots__ = () + + def __init__(self, default): + if isinstance(default, (tuple, list)): + self.__default__ = tuple((typing._type_check(d, "Default must be a type") + for d in default)) + elif default: + self.__default__ = typing._type_check(default, "Default must be a type") + else: + self.__default__ = None + + +# Add default and infer_variance parameters from PEP 696 and 695 +class TypeVar(typing.TypeVar, _DefaultMixin, _root=True): + """Type variable.""" + + __module__ = 'typing' + + def __init__(self, name, *constraints, bound=None, + covariant=False, contravariant=False, + default=None, infer_variance=False): + super().__init__(name, *constraints, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + self.__infer_variance__ = infer_variance + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + +# Python 3.10+ has PEP 612 +if hasattr(typing, 'ParamSpecArgs'): + ParamSpecArgs = typing.ParamSpecArgs + ParamSpecKwargs = typing.ParamSpecKwargs +# 3.7-3.9 +else: + class _Immutable: + """Mixin to indicate that object should not be copied.""" + __slots__ = () + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + class ParamSpecArgs(_Immutable): + """The args for a ParamSpec object. + + Given a ParamSpec object P, P.args is an instance of ParamSpecArgs. + + ParamSpecArgs objects have a reference back to their ParamSpec: + + P.args.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.args" + + def __eq__(self, other): + if not isinstance(other, ParamSpecArgs): + return NotImplemented + return self.__origin__ == other.__origin__ + + class ParamSpecKwargs(_Immutable): + """The kwargs for a ParamSpec object. + + Given a ParamSpec object P, P.kwargs is an instance of ParamSpecKwargs. + + ParamSpecKwargs objects have a reference back to their ParamSpec: + + P.kwargs.__origin__ is P + + This type is meant for runtime introspection and has no special meaning to + static type checkers. + """ + def __init__(self, origin): + self.__origin__ = origin + + def __repr__(self): + return f"{self.__origin__.__name__}.kwargs" + + def __eq__(self, other): + if not isinstance(other, ParamSpecKwargs): + return NotImplemented + return self.__origin__ == other.__origin__ + +# 3.10+ +if hasattr(typing, 'ParamSpec'): + + # Add default Parameter - PEP 696 + class ParamSpec(typing.ParamSpec, _DefaultMixin, _root=True): + """Parameter specification variable.""" + + __module__ = 'typing' + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): + super().__init__(name, bound=bound, covariant=covariant, + contravariant=contravariant) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +# 3.7-3.9 +else: + + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class ParamSpec(list, _DefaultMixin): + """Parameter specification variable. + + Usage:: + + P = ParamSpec('P') + + Parameter specification variables exist primarily for the benefit of static + type checkers. They are used to forward the parameter types of one + callable to another callable, a pattern commonly found in higher order + functions and decorators. They are only valid when used in ``Concatenate``, + or s the first argument to ``Callable``. In Python 3.10 and higher, + they are also supported in user-defined Generics at runtime. + See class Generic for more information on generic types. An + example for annotating a decorator:: + + T = TypeVar('T') + P = ParamSpec('P') + + def add_logging(f: Callable[P, T]) -> Callable[P, T]: + '''A type-safe decorator to add logging to a function.''' + def inner(*args: P.args, **kwargs: P.kwargs) -> T: + logging.info(f'{f.__name__} was called') + return f(*args, **kwargs) + return inner + + @add_logging + def add_two(x: float, y: float) -> float: + '''Add two numbers together.''' + return x + y + + Parameter specification variables defined with covariant=True or + contravariant=True can be used to declare covariant or contravariant + generic types. These keyword arguments are valid, but their actual semantics + are yet to be decided. See PEP 612 for details. + + Parameter specification variables can be introspected. e.g.: + + P.__name__ == 'T' + P.__bound__ == None + P.__covariant__ == False + P.__contravariant__ == False + + Note that only parameter specification variables defined in global scope can + be pickled. + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + @property + def args(self): + return ParamSpecArgs(self) + + @property + def kwargs(self): + return ParamSpecKwargs(self) + + def __init__(self, name, *, bound=None, covariant=False, contravariant=False, + default=None): + super().__init__([self]) + self.__name__ = name + self.__covariant__ = bool(covariant) + self.__contravariant__ = bool(contravariant) + if bound: + self.__bound__ = typing._type_check(bound, 'Bound must be a type.') + else: + self.__bound__ = None + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + def __repr__(self): + if self.__covariant__: + prefix = '+' + elif self.__contravariant__: + prefix = '-' + else: + prefix = '~' + return prefix + self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + # Hack to get typing._type_check to pass. + def __call__(self, *args, **kwargs): + pass + + +# 3.7-3.9 +if not hasattr(typing, 'Concatenate'): + # Inherits from list as a workaround for Callable checks in Python < 3.9.2. + class _ConcatenateGenericAlias(list): + + # Trick Generic into looking into this for __parameters__. + __class__ = typing._GenericAlias + + # Flag in 3.8. + _special = False + + def __init__(self, origin, args): + super().__init__(args) + self.__origin__ = origin + self.__args__ = args + + def __repr__(self): + _type_repr = typing._type_repr + return (f'{_type_repr(self.__origin__)}' + f'[{", ".join(_type_repr(arg) for arg in self.__args__)}]') + + def __hash__(self): + return hash((self.__origin__, self.__args__)) + + # Hack to get typing._type_check to pass in Generic. + def __call__(self, *args, **kwargs): + pass + + @property + def __parameters__(self): + return tuple( + tp for tp in self.__args__ if isinstance(tp, (typing.TypeVar, ParamSpec)) + ) + + +# 3.7-3.9 +@typing._tp_cache +def _concatenate_getitem(self, parameters): + if parameters == (): + raise TypeError("Cannot take a Concatenate of no types.") + if not isinstance(parameters, tuple): + parameters = (parameters,) + if not isinstance(parameters[-1], ParamSpec): + raise TypeError("The last parameter to Concatenate should be a " + "ParamSpec variable.") + msg = "Concatenate[arg, ...]: each arg must be a type." + parameters = tuple(typing._type_check(p, msg) for p in parameters) + return _ConcatenateGenericAlias(self, parameters) + + +# 3.10+ +if hasattr(typing, 'Concatenate'): + Concatenate = typing.Concatenate + _ConcatenateGenericAlias = typing._ConcatenateGenericAlias # noqa +# 3.9 +elif sys.version_info[:2] >= (3, 9): + @_TypeAliasForm + def Concatenate(self, parameters): + """Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """ + return _concatenate_getitem(self, parameters) +# 3.7-8 +else: + class _ConcatenateForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + return _concatenate_getitem(self, parameters) + + Concatenate = _ConcatenateForm( + 'Concatenate', + doc="""Used in conjunction with ``ParamSpec`` and ``Callable`` to represent a + higher order function which adds, removes or transforms parameters of a + callable. + + For example:: + + Callable[Concatenate[int, P], int] + + See PEP 612 for detailed information. + """) + +# 3.10+ +if hasattr(typing, 'TypeGuard'): + TypeGuard = typing.TypeGuard +# 3.9 +elif sys.version_info[:2] >= (3, 9): + class _TypeGuardForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_TypeGuardForm + def TypeGuard(self, parameters): + """Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """ + item = typing._type_check(parameters, f'{self} accepts only a single type.') + return typing._GenericAlias(self, (item,)) +# 3.7-3.8 +else: + class _TypeGuardForm(typing._SpecialForm, _root=True): + + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type') + return typing._GenericAlias(self, (item,)) + + TypeGuard = _TypeGuardForm( + 'TypeGuard', + doc="""Special typing form used to annotate the return type of a user-defined + type guard function. ``TypeGuard`` only accepts a single type argument. + At runtime, functions marked this way should return a boolean. + + ``TypeGuard`` aims to benefit *type narrowing* -- a technique used by static + type checkers to determine a more precise type of an expression within a + program's code flow. Usually type narrowing is done by analyzing + conditional code flow and applying the narrowing to a block of code. The + conditional expression here is sometimes referred to as a "type guard". + + Sometimes it would be convenient to use a user-defined boolean function + as a type guard. Such a function should use ``TypeGuard[...]`` as its + return type to alert static type checkers to this intention. + + Using ``-> TypeGuard`` tells the static type checker that for a given + function: + + 1. The return value is a boolean. + 2. If the return value is ``True``, the type of its argument + is the type inside ``TypeGuard``. + + For example:: + + def is_str(val: Union[str, float]): + # "isinstance" type guard + if isinstance(val, str): + # Type of ``val`` is narrowed to ``str`` + ... + else: + # Else, type of ``val`` is narrowed to ``float``. + ... + + Strict type narrowing is not enforced -- ``TypeB`` need not be a narrower + form of ``TypeA`` (it can even be a wider form) and this may lead to + type-unsafe results. The main reason is to allow for things like + narrowing ``List[object]`` to ``List[str]`` even though the latter is not + a subtype of the former, since ``List`` is invariant. The responsibility of + writing type-safe type guards is left to the user. + + ``TypeGuard`` also works with type variables. For more information, see + PEP 647 (User-Defined Type Guards). + """) + + +# Vendored from cpython typing._SpecialFrom +class _SpecialForm(typing._Final, _root=True): + __slots__ = ('_name', '__doc__', '_getitem') + + def __init__(self, getitem): + self._getitem = getitem + self._name = getitem.__name__ + self.__doc__ = getitem.__doc__ + + def __getattr__(self, item): + if item in {'__name__', '__qualname__'}: + return self._name + + raise AttributeError(item) + + def __mro_entries__(self, bases): + raise TypeError(f"Cannot subclass {self!r}") + + def __repr__(self): + return f'typing_extensions.{self._name}' + + def __reduce__(self): + return self._name + + def __call__(self, *args, **kwds): + raise TypeError(f"Cannot instantiate {self!r}") + + def __or__(self, other): + return typing.Union[self, other] + + def __ror__(self, other): + return typing.Union[other, self] + + def __instancecheck__(self, obj): + raise TypeError(f"{self} cannot be used with isinstance()") + + def __subclasscheck__(self, cls): + raise TypeError(f"{self} cannot be used with issubclass()") + + @typing._tp_cache + def __getitem__(self, parameters): + return self._getitem(self, parameters) + + +if hasattr(typing, "LiteralString"): + LiteralString = typing.LiteralString +else: + @_SpecialForm + def LiteralString(self, params): + """Represents an arbitrary literal string. + + Example:: + + from typing_extensions import LiteralString + + def query(sql: LiteralString) -> ...: + ... + + query("SELECT * FROM table") # ok + query(f"SELECT * FROM {input()}") # not ok + + See PEP 675 for details. + + """ + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Self"): + Self = typing.Self +else: + @_SpecialForm + def Self(self, params): + """Used to spell the type of "self" in classes. + + Example:: + + from typing import Self + + class ReturnsSelf: + def parse(self, data: bytes) -> Self: + ... + return self + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, "Never"): + Never = typing.Never +else: + @_SpecialForm + def Never(self, params): + """The bottom type, a type that has no members. + + This can be used to define a function that should never be + called, or a function that never returns:: + + from typing_extensions import Never + + def never_call_me(arg: Never) -> None: + pass + + def int_or_str(arg: int | str) -> None: + never_call_me(arg) # type checker error + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + never_call_me(arg) # ok, arg is of type Never + + """ + + raise TypeError(f"{self} is not subscriptable") + + +if hasattr(typing, 'Required'): + Required = typing.Required + NotRequired = typing.NotRequired +elif sys.version_info[:2] >= (3, 9): + class _ExtensionsSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + @_ExtensionsSpecialForm + def Required(self, parameters): + """A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + @_ExtensionsSpecialForm + def NotRequired(self, parameters): + """A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + +else: + class _RequiredForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return typing._GenericAlias(self, (item,)) + + Required = _RequiredForm( + 'Required', + doc="""A special typing construct to mark a key of a total=False TypedDict + as required. For example: + + class Movie(TypedDict, total=False): + title: Required[str] + year: int + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + + There is no runtime checking that a required key is actually provided + when instantiating a related TypedDict. + """) + NotRequired = _RequiredForm( + 'NotRequired', + doc="""A special typing construct to mark a key of a TypedDict as + potentially missing. For example: + + class Movie(TypedDict): + title: str + year: NotRequired[int] + + m = Movie( + title='The Matrix', # typechecker error if key is omitted + year=1999, + ) + """) + + +if hasattr(typing, "Unpack"): # 3.11+ + Unpack = typing.Unpack +elif sys.version_info[:2] >= (3, 9): + class _UnpackSpecialForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + @_UnpackSpecialForm + def Unpack(self, parameters): + """A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """ + item = typing._type_check(parameters, f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + +else: + class _UnpackAlias(typing._GenericAlias, _root=True): + __class__ = typing.TypeVar + + class _UnpackForm(typing._SpecialForm, _root=True): + def __repr__(self): + return 'typing_extensions.' + self._name + + def __getitem__(self, parameters): + item = typing._type_check(parameters, + f'{self._name} accepts only a single type.') + return _UnpackAlias(self, (item,)) + + Unpack = _UnpackForm( + 'Unpack', + doc="""A special typing construct to unpack a variadic type. For example: + + Shape = TypeVarTuple('Shape') + Batch = NewType('Batch', int) + + def add_batch_axis( + x: Array[Unpack[Shape]] + ) -> Array[Batch, Unpack[Shape]]: ... + + """) + + def _is_unpack(obj): + return isinstance(obj, _UnpackAlias) + + +if hasattr(typing, "TypeVarTuple"): # 3.11+ + + # Add default Parameter - PEP 696 + class TypeVarTuple(typing.TypeVarTuple, _DefaultMixin, _root=True): + """Type variable tuple.""" + + def __init__(self, name, *, default=None): + super().__init__(name) + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + +else: + class TypeVarTuple(_DefaultMixin): + """Type variable tuple. + + Usage:: + + Ts = TypeVarTuple('Ts') + + In the same way that a normal type variable is a stand-in for a single + type such as ``int``, a type variable *tuple* is a stand-in for a *tuple* + type such as ``Tuple[int, str]``. + + Type variable tuples can be used in ``Generic`` declarations. + Consider the following example:: + + class Array(Generic[*Ts]): ... + + The ``Ts`` type variable tuple here behaves like ``tuple[T1, T2]``, + where ``T1`` and ``T2`` are type variables. To use these type variables + as type parameters of ``Array``, we must *unpack* the type variable tuple using + the star operator: ``*Ts``. The signature of ``Array`` then behaves + as if we had simply written ``class Array(Generic[T1, T2]): ...``. + In contrast to ``Generic[T1, T2]``, however, ``Generic[*Shape]`` allows + us to parameterise the class with an *arbitrary* number of type parameters. + + Type variable tuples can be used anywhere a normal ``TypeVar`` can. + This includes class definitions, as shown above, as well as function + signatures and variable annotations:: + + class Array(Generic[*Ts]): + + def __init__(self, shape: Tuple[*Ts]): + self._shape: Tuple[*Ts] = shape + + def get_shape(self) -> Tuple[*Ts]: + return self._shape + + shape = (Height(480), Width(640)) + x: Array[Height, Width] = Array(shape) + y = abs(x) # Inferred type is Array[Height, Width] + z = x + x # ... is Array[Height, Width] + x.get_shape() # ... is tuple[Height, Width] + + """ + + # Trick Generic __parameters__. + __class__ = typing.TypeVar + + def __iter__(self): + yield self.__unpacked__ + + def __init__(self, name, *, default=None): + self.__name__ = name + _DefaultMixin.__init__(self, default) + + # for pickling: + try: + def_mod = sys._getframe(1).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): + def_mod = None + if def_mod != 'typing_extensions': + self.__module__ = def_mod + + self.__unpacked__ = Unpack[self] + + def __repr__(self): + return self.__name__ + + def __hash__(self): + return object.__hash__(self) + + def __eq__(self, other): + return self is other + + def __reduce__(self): + return self.__name__ + + def __init_subclass__(self, *args, **kwds): + if '_root' not in kwds: + raise TypeError("Cannot subclass special typing classes") + + +if hasattr(typing, "reveal_type"): + reveal_type = typing.reveal_type +else: + def reveal_type(__obj: T) -> T: + """Reveal the inferred type of a variable. + + When a static type checker encounters a call to ``reveal_type()``, + it will emit the inferred type of the argument:: + + x: int = 1 + reveal_type(x) + + Running a static type checker (e.g., ``mypy``) on this example + will produce output similar to 'Revealed type is "builtins.int"'. + + At runtime, the function prints the runtime type of the + argument and returns it unchanged. + + """ + print(f"Runtime type is {type(__obj).__name__!r}", file=sys.stderr) + return __obj + + +if hasattr(typing, "assert_never"): + assert_never = typing.assert_never +else: + def assert_never(__arg: Never) -> Never: + """Assert to the type checker that a line of code is unreachable. + + Example:: + + def int_or_str(arg: int | str) -> None: + match arg: + case int(): + print("It's an int") + case str(): + print("It's a str") + case _: + assert_never(arg) + + If a type checker finds that a call to assert_never() is + reachable, it will emit an error. + + At runtime, this throws an exception when called. + + """ + raise AssertionError("Expected code to be unreachable") + + +if hasattr(typing, 'dataclass_transform'): + dataclass_transform = typing.dataclass_transform +else: + def dataclass_transform( + *, + eq_default: bool = True, + order_default: bool = False, + kw_only_default: bool = False, + field_specifiers: typing.Tuple[ + typing.Union[typing.Type[typing.Any], typing.Callable[..., typing.Any]], + ... + ] = (), + **kwargs: typing.Any, + ) -> typing.Callable[[T], T]: + """Decorator that marks a function, class, or metaclass as providing + dataclass-like behavior. + + Example: + + from typing_extensions import dataclass_transform + + _T = TypeVar("_T") + + # Used on a decorator function + @dataclass_transform() + def create_model(cls: type[_T]) -> type[_T]: + ... + return cls + + @create_model + class CustomerModel: + id: int + name: str + + # Used on a base class + @dataclass_transform() + class ModelBase: ... + + class CustomerModel(ModelBase): + id: int + name: str + + # Used on a metaclass + @dataclass_transform() + class ModelMeta(type): ... + + class ModelBase(metaclass=ModelMeta): ... + + class CustomerModel(ModelBase): + id: int + name: str + + Each of the ``CustomerModel`` classes defined in this example will now + behave similarly to a dataclass created with the ``@dataclasses.dataclass`` + decorator. For example, the type checker will synthesize an ``__init__`` + method. + + The arguments to this decorator can be used to customize this behavior: + - ``eq_default`` indicates whether the ``eq`` parameter is assumed to be + True or False if it is omitted by the caller. + - ``order_default`` indicates whether the ``order`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``kw_only_default`` indicates whether the ``kw_only`` parameter is + assumed to be True or False if it is omitted by the caller. + - ``field_specifiers`` specifies a static list of supported classes + or functions that describe fields, similar to ``dataclasses.field()``. + + At runtime, this decorator records its arguments in the + ``__dataclass_transform__`` attribute on the decorated object. + + See PEP 681 for details. + + """ + def decorator(cls_or_fn): + cls_or_fn.__dataclass_transform__ = { + "eq_default": eq_default, + "order_default": order_default, + "kw_only_default": kw_only_default, + "field_specifiers": field_specifiers, + "kwargs": kwargs, + } + return cls_or_fn + return decorator + + +if hasattr(typing, "override"): + override = typing.override +else: + _F = typing.TypeVar("_F", bound=typing.Callable[..., typing.Any]) + + def override(__arg: _F) -> _F: + """Indicate that a method is intended to override a method in a base class. + + Usage: + + class Base: + def method(self) -> None: ... + pass + + class Child(Base): + @override + def method(self) -> None: + super().method() + + When this decorator is applied to a method, the type checker will + validate that it overrides a method with the same name on a base class. + This helps prevent bugs that may occur when a base class is changed + without an equivalent change to a child class. + + See PEP 698 for details. + + """ + return __arg + + +# We have to do some monkey patching to deal with the dual nature of +# Unpack/TypeVarTuple: +# - We want Unpack to be a kind of TypeVar so it gets accepted in +# Generic[Unpack[Ts]] +# - We want it to *not* be treated as a TypeVar for the purposes of +# counting generic parameters, so that when we subscript a generic, +# the runtime doesn't try to substitute the Unpack with the subscripted type. +if not hasattr(typing, "TypeVarTuple"): + typing._collect_type_vars = _collect_type_vars + typing._check_generic = _check_generic + + +# Backport typing.NamedTuple as it exists in Python 3.11. +# In 3.11, the ability to define generic `NamedTuple`s was supported. +# This was explicitly disallowed in 3.9-3.10, and only half-worked in <=3.8. +if sys.version_info >= (3, 11): + NamedTuple = typing.NamedTuple +else: + def _caller(): + try: + return sys._getframe(2).f_globals.get('__name__', '__main__') + except (AttributeError, ValueError): # For platforms without _getframe() + return None + + def _make_nmtuple(name, types, module, defaults=()): + fields = [n for n, t in types] + annotations = {n: typing._type_check(t, f"field {n} annotation must be a type") + for n, t in types} + nm_tpl = collections.namedtuple(name, fields, + defaults=defaults, module=module) + nm_tpl.__annotations__ = nm_tpl.__new__.__annotations__ = annotations + # The `_field_types` attribute was removed in 3.9; + # in earlier versions, it is the same as the `__annotations__` attribute + if sys.version_info < (3, 9): + nm_tpl._field_types = annotations + return nm_tpl + + _prohibited_namedtuple_fields = typing._prohibited + _special_namedtuple_fields = frozenset({'__module__', '__name__', '__annotations__'}) + + class _NamedTupleMeta(type): + def __new__(cls, typename, bases, ns): + assert _NamedTuple in bases + for base in bases: + if base is not _NamedTuple and base is not typing.Generic: + raise TypeError( + 'can only inherit from a NamedTuple type and Generic') + bases = tuple(tuple if base is _NamedTuple else base for base in bases) + types = ns.get('__annotations__', {}) + default_names = [] + for field_name in types: + if field_name in ns: + default_names.append(field_name) + elif default_names: + raise TypeError(f"Non-default namedtuple field {field_name} " + f"cannot follow default field" + f"{'s' if len(default_names) > 1 else ''} " + f"{', '.join(default_names)}") + nm_tpl = _make_nmtuple( + typename, types.items(), + defaults=[ns[n] for n in default_names], + module=ns['__module__'] + ) + nm_tpl.__bases__ = bases + if typing.Generic in bases: + class_getitem = typing.Generic.__class_getitem__.__func__ + nm_tpl.__class_getitem__ = classmethod(class_getitem) + # update from user namespace without overriding special namedtuple attributes + for key in ns: + if key in _prohibited_namedtuple_fields: + raise AttributeError("Cannot overwrite NamedTuple attribute " + key) + elif key not in _special_namedtuple_fields and key not in nm_tpl._fields: + setattr(nm_tpl, key, ns[key]) + if typing.Generic in bases: + nm_tpl.__init_subclass__() + return nm_tpl + + def NamedTuple(__typename, __fields=None, **kwargs): + if __fields is None: + __fields = kwargs.items() + elif kwargs: + raise TypeError("Either list of fields or keywords" + " can be provided to NamedTuple, not both") + return _make_nmtuple(__typename, __fields, module=_caller()) + + NamedTuple.__doc__ = typing.NamedTuple.__doc__ + _NamedTuple = type.__new__(_NamedTupleMeta, 'NamedTuple', (), {}) + + # On 3.8+, alter the signature so that it matches typing.NamedTuple. + # The signature of typing.NamedTuple on >=3.8 is invalid syntax in Python 3.7, + # so just leave the signature as it is on 3.7. + if sys.version_info >= (3, 8): + NamedTuple.__text_signature__ = '(typename, fields=None, /, **kwargs)' + + def _namedtuple_mro_entries(bases): + assert NamedTuple in bases + return (_NamedTuple,) + + NamedTuple.__mro_entries__ = _namedtuple_mro_entries diff --git a/src/poetry/core/_vendor/vendor.txt b/src/poetry/core/_vendor/vendor.txt new file mode 100644 index 0000000..a36782d --- /dev/null +++ b/src/poetry/core/_vendor/vendor.txt @@ -0,0 +1,9 @@ +attrs==22.1.0 ; python_version >= "3.7" and python_version < "4.0" +jsonschema==4.17.0 ; python_version >= "3.7" and python_version < "4.0" +lark==1.1.4 ; python_version >= "3.7" and python_version < "4.0" +packaging==21.3 ; python_version >= "3.7" and python_version < "4.0" +pkgutil-resolve-name==1.3.10 ; python_version >= "3.7" and python_version < "3.9" +pyparsing==3.0.9 ; python_version >= "3.7" and python_version < "4.0" +pyrsistent==0.19.2 ; python_version >= "3.7" and python_version < "4.0" +tomlkit==0.11.6 ; python_version >= "3.7" and python_version < "4.0" +typing-extensions==4.4.0 ; python_version >= "3.7" and python_version < "4.0" diff --git a/src/poetry/core/constraints/__init__.py b/src/poetry/core/constraints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/constraints/generic/__init__.py b/src/poetry/core/constraints/generic/__init__.py new file mode 100644 index 0000000..ccd7f5a --- /dev/null +++ b/src/poetry/core/constraints/generic/__init__.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from poetry.core.constraints.generic.any_constraint import AnyConstraint +from poetry.core.constraints.generic.base_constraint import BaseConstraint +from poetry.core.constraints.generic.constraint import Constraint +from poetry.core.constraints.generic.empty_constraint import EmptyConstraint +from poetry.core.constraints.generic.multi_constraint import MultiConstraint +from poetry.core.constraints.generic.parser import parse_constraint +from poetry.core.constraints.generic.union_constraint import UnionConstraint + + +__all__ = [ + "AnyConstraint", + "BaseConstraint", + "Constraint", + "EmptyConstraint", + "MultiConstraint", + "UnionConstraint", + "parse_constraint", +] diff --git a/src/poetry/core/constraints/generic/any_constraint.py b/src/poetry/core/constraints/generic/any_constraint.py new file mode 100644 index 0000000..db23086 --- /dev/null +++ b/src/poetry/core/constraints/generic/any_constraint.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from poetry.core.constraints.generic.base_constraint import BaseConstraint +from poetry.core.constraints.generic.empty_constraint import EmptyConstraint + + +class AnyConstraint(BaseConstraint): + def allows(self, other: BaseConstraint) -> bool: + return True + + def allows_all(self, other: BaseConstraint) -> bool: + return True + + def allows_any(self, other: BaseConstraint) -> bool: + return True + + def difference(self, other: BaseConstraint) -> BaseConstraint: + if other.is_any(): + return EmptyConstraint() + + raise ValueError("Unimplemented constraint difference") + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + return other + + def union(self, other: BaseConstraint) -> AnyConstraint: + return AnyConstraint() + + def is_any(self) -> bool: + return True + + def is_empty(self) -> bool: + return False + + def __str__(self) -> str: + return "*" + + def __eq__(self, other: object) -> bool: + return isinstance(other, BaseConstraint) and other.is_any() + + def __hash__(self) -> int: + return hash("any") diff --git a/src/poetry/core/constraints/generic/base_constraint.py b/src/poetry/core/constraints/generic/base_constraint.py new file mode 100644 index 0000000..df58269 --- /dev/null +++ b/src/poetry/core/constraints/generic/base_constraint.py @@ -0,0 +1,39 @@ +from __future__ import annotations + + +class BaseConstraint: + def allows(self, other: BaseConstraint) -> bool: + raise NotImplementedError() + + def allows_all(self, other: BaseConstraint) -> bool: + raise NotImplementedError() + + def allows_any(self, other: BaseConstraint) -> bool: + raise NotImplementedError() + + def difference(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError() + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError() + + def union(self, other: BaseConstraint) -> BaseConstraint: + raise NotImplementedError() + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {str(self)}>" + + def __str__(self) -> str: + raise NotImplementedError() + + def __hash__(self) -> int: + raise NotImplementedError() + + def __eq__(self, other: object) -> bool: + raise NotImplementedError() diff --git a/src/poetry/core/constraints/generic/constraint.py b/src/poetry/core/constraints/generic/constraint.py new file mode 100644 index 0000000..3180efb --- /dev/null +++ b/src/poetry/core/constraints/generic/constraint.py @@ -0,0 +1,137 @@ +from __future__ import annotations + +import operator + +from poetry.core.constraints.generic.any_constraint import AnyConstraint +from poetry.core.constraints.generic.base_constraint import BaseConstraint +from poetry.core.constraints.generic.empty_constraint import EmptyConstraint + + +class Constraint(BaseConstraint): + OP_EQ = operator.eq + OP_NE = operator.ne + + _trans_op_str = {"=": OP_EQ, "==": OP_EQ, "!=": OP_NE} + + _trans_op_int = {OP_EQ: "==", OP_NE: "!="} + + def __init__(self, version: str, operator: str = "==") -> None: + if operator == "=": + operator = "==" + + self._version = version + self._operator = operator + self._op = self._trans_op_str[operator] + + @property + def version(self) -> str: + return self._version + + @property + def operator(self) -> str: + return self._operator + + def allows(self, other: BaseConstraint) -> bool: + if not isinstance(other, Constraint): + raise ValueError("Unimplemented comparison of constraints") + + is_equal_op = self._operator == "==" + is_non_equal_op = self._operator == "!=" + is_other_equal_op = other.operator == "==" + is_other_non_equal_op = other.operator == "!=" + + if is_equal_op and is_other_equal_op: + return self._version == other.version + + if ( + is_equal_op + and is_other_non_equal_op + or is_non_equal_op + and is_other_equal_op + or is_non_equal_op + and is_other_non_equal_op + ): + return self._version != other.version + + return False + + def allows_all(self, other: BaseConstraint) -> bool: + if not isinstance(other, Constraint): + return other.is_empty() + + return other == self + + def allows_any(self, other: BaseConstraint) -> bool: + if isinstance(other, Constraint): + is_non_equal_op = self._operator == "!=" + is_other_non_equal_op = other.operator == "!=" + + if is_non_equal_op and is_other_non_equal_op: + return self._version != other.version + + return other.allows(self) + + def difference(self, other: BaseConstraint) -> Constraint | EmptyConstraint: + if other.allows(self): + return EmptyConstraint() + + return self + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + from poetry.core.constraints.generic.multi_constraint import MultiConstraint + + if isinstance(other, Constraint): + if other == self: + return self + + if self.operator == "!=" and other.operator == "==" and self.allows(other): + return other + + if other.operator == "!=" and self.operator == "==" and other.allows(self): + return self + + if other.operator == "!=" and self.operator == "!=": + return MultiConstraint(self, other) + + return EmptyConstraint() + + return other.intersect(self) + + def union(self, other: BaseConstraint) -> BaseConstraint: + if isinstance(other, Constraint): + from poetry.core.constraints.generic.union_constraint import UnionConstraint + + if other == self: + return self + + if self.operator == "!=" and other.operator == "==" and self.allows(other): + return self + + if other.operator == "!=" and self.operator == "==" and other.allows(self): + return other + + if other.operator == "==" and self.operator == "==": + return UnionConstraint(self, other) + + return AnyConstraint() + + return other.union(self) + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Constraint): + return NotImplemented + + return (self.version, self.operator) == (other.version, other.operator) + + def __hash__(self) -> int: + return hash((self._operator, self._version)) + + def __str__(self) -> str: + op = self._operator if self._operator != "==" else "" + return f"{op}{self._version}" diff --git a/src/poetry/core/constraints/generic/empty_constraint.py b/src/poetry/core/constraints/generic/empty_constraint.py new file mode 100644 index 0000000..83d0d14 --- /dev/null +++ b/src/poetry/core/constraints/generic/empty_constraint.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +from poetry.core.constraints.generic.base_constraint import BaseConstraint + + +class EmptyConstraint(BaseConstraint): + pretty_string = None + + def matches(self, _: BaseConstraint) -> bool: + return True + + def is_empty(self) -> bool: + return True + + def allows(self, other: BaseConstraint) -> bool: + return False + + def allows_all(self, other: BaseConstraint) -> bool: + return other.is_empty() + + def allows_any(self, other: BaseConstraint) -> bool: + return False + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + return self + + def difference(self, other: BaseConstraint) -> BaseConstraint: + return self + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BaseConstraint): + return False + + return other.is_empty() + + def __hash__(self) -> int: + return hash("empty") + + def __str__(self) -> str: + return "" diff --git a/src/poetry/core/constraints/generic/multi_constraint.py b/src/poetry/core/constraints/generic/multi_constraint.py new file mode 100644 index 0000000..0a1f05f --- /dev/null +++ b/src/poetry/core/constraints/generic/multi_constraint.py @@ -0,0 +1,96 @@ +from __future__ import annotations + +from poetry.core.constraints.generic.base_constraint import BaseConstraint +from poetry.core.constraints.generic.constraint import Constraint + + +class MultiConstraint(BaseConstraint): + def __init__(self, *constraints: Constraint) -> None: + if any(c.operator == "==" for c in constraints): + raise ValueError( + "A multi-constraint can only be comprised of negative constraints" + ) + + self._constraints = constraints + + @property + def constraints(self) -> tuple[Constraint, ...]: + return self._constraints + + def allows(self, other: BaseConstraint) -> bool: + return all(constraint.allows(other) for constraint in self._constraints) + + def allows_all(self, other: BaseConstraint) -> bool: + if other.is_any(): + return False + + if other.is_empty(): + return True + + if not isinstance(other, MultiConstraint): + return self.allows(other) + + our_constraints = iter(self._constraints) + their_constraints = iter(other.constraints) + our_constraint = next(our_constraints, None) + their_constraint = next(their_constraints, None) + + while our_constraint and their_constraint: + if our_constraint.allows_all(their_constraint): + their_constraint = next(their_constraints, None) + else: + our_constraint = next(our_constraints, None) + + return their_constraint is None + + def allows_any(self, other: BaseConstraint) -> bool: + if other.is_any(): + return True + + if other.is_empty(): + return True + + if isinstance(other, Constraint): + return self.allows(other) + + if isinstance(other, MultiConstraint): + return any( + c1.allows(c2) for c1 in self.constraints for c2 in other.constraints + ) + + return False + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + if not isinstance(other, Constraint): + raise ValueError("Unimplemented constraint intersection") + + constraints = self._constraints + if other not in constraints: + constraints += (other,) + else: + constraints = (other,) + + if len(constraints) == 1: + return constraints[0] + + return MultiConstraint(*constraints) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MultiConstraint): + return False + + return set(self._constraints) == set(other._constraints) + + def __hash__(self) -> int: + h = hash("multi") + for constraint in self._constraints: + h ^= hash(constraint) + + return h + + def __str__(self) -> str: + constraints = [] + for constraint in self._constraints: + constraints.append(str(constraint)) + + return ", ".join(constraints) diff --git a/src/poetry/core/constraints/generic/parser.py b/src/poetry/core/constraints/generic/parser.py new file mode 100644 index 0000000..abfba8e --- /dev/null +++ b/src/poetry/core/constraints/generic/parser.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import re + +from typing import TYPE_CHECKING + +from poetry.core.constraints.generic.any_constraint import AnyConstraint +from poetry.core.constraints.generic.constraint import Constraint +from poetry.core.constraints.generic.union_constraint import UnionConstraint +from poetry.core.constraints.version.exceptions import ParseConstraintError + + +if TYPE_CHECKING: + from poetry.core.constraints.generic.base_constraint import BaseConstraint + + +BASIC_CONSTRAINT = re.compile(r"^(!?==?)?\s*([^\s]+?)\s*$") + + +def parse_constraint(constraints: str) -> BaseConstraint: + if constraints == "*": + return AnyConstraint() + + or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip()) + or_groups = [] + for constraints in or_constraints: + and_constraints = re.split( + r"(?< ,]) *(? 1: + for constraint in and_constraints: + constraint_objects.append(parse_single_constraint(constraint)) + else: + constraint_objects.append(parse_single_constraint(and_constraints[0])) + + if len(constraint_objects) == 1: + constraint = constraint_objects[0] + else: + constraint = constraint_objects[0] + for next_constraint in constraint_objects[1:]: + constraint = constraint.intersect(next_constraint) + + or_groups.append(constraint) + + if len(or_groups) == 1: + return or_groups[0] + else: + return UnionConstraint(*or_groups) + + +def parse_single_constraint(constraint: str) -> Constraint: + # Basic comparator + m = BASIC_CONSTRAINT.match(constraint) + if m: + op = m.group(1) + if op is None: + op = "==" + + version = m.group(2).strip() + + return Constraint(version, op) + + raise ParseConstraintError(f"Could not parse version constraint: {constraint}") diff --git a/src/poetry/core/constraints/generic/union_constraint.py b/src/poetry/core/constraints/generic/union_constraint.py new file mode 100644 index 0000000..8db1bd8 --- /dev/null +++ b/src/poetry/core/constraints/generic/union_constraint.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +from poetry.core.constraints.generic.base_constraint import BaseConstraint +from poetry.core.constraints.generic.constraint import Constraint +from poetry.core.constraints.generic.empty_constraint import EmptyConstraint +from poetry.core.constraints.generic.multi_constraint import MultiConstraint + + +class UnionConstraint(BaseConstraint): + def __init__(self, *constraints: BaseConstraint) -> None: + self._constraints = constraints + + @property + def constraints(self) -> tuple[BaseConstraint, ...]: + return self._constraints + + def allows( + self, + other: BaseConstraint, + ) -> bool: + return any(constraint.allows(other) for constraint in self._constraints) + + def allows_any(self, other: BaseConstraint) -> bool: + if other.is_empty(): + return False + + if other.is_any(): + return True + + if isinstance(other, (UnionConstraint, MultiConstraint)): + constraints = other.constraints + else: + constraints = (other,) + + return any( + our_constraint.allows_any(their_constraint) + for our_constraint in self._constraints + for their_constraint in constraints + ) + + def allows_all(self, other: BaseConstraint) -> bool: + if other.is_any(): + return False + + if other.is_empty(): + return True + + if isinstance(other, (UnionConstraint, MultiConstraint)): + constraints = other.constraints + else: + constraints = (other,) + + our_constraints = iter(self._constraints) + their_constraints = iter(constraints) + our_constraint = next(our_constraints, None) + their_constraint = next(their_constraints, None) + + while our_constraint and their_constraint: + if our_constraint.allows_all(their_constraint): + their_constraint = next(their_constraints, None) + else: + our_constraint = next(our_constraints, None) + + return their_constraint is None + + def intersect(self, other: BaseConstraint) -> BaseConstraint: + if other.is_any(): + return self + + if other.is_empty(): + return other + + if isinstance(other, Constraint): + if self.allows(other): + return other + + return EmptyConstraint() + + # Two remaining cases: an intersection with another union, or an intersection + # with a multi. + # + # In the first case: + # (A or B) and (C or D) => (A and C) or (A and D) or (B and C) or (B and D) + # + # In the second case: + # (A or B) and (C and D) => (A and C and D) or (B and C and D) + new_constraints = [] + if isinstance(other, UnionConstraint): + for our_constraint in self._constraints: + for their_constraint in other.constraints: + intersection = our_constraint.intersect(their_constraint) + + if ( + not intersection.is_empty() + and intersection not in new_constraints + ): + new_constraints.append(intersection) + + else: + assert isinstance(other, MultiConstraint) + + for our_constraint in self._constraints: + intersection = our_constraint + for their_constraint in other.constraints: + intersection = intersection.intersect(their_constraint) + + if not intersection.is_empty() and intersection not in new_constraints: + new_constraints.append(intersection) + + if not new_constraints: + return EmptyConstraint() + + if len(new_constraints) == 1: + return new_constraints[0] + + return UnionConstraint(*new_constraints) + + def union(self, other: BaseConstraint) -> UnionConstraint: + if not isinstance(other, Constraint): + raise ValueError("Unimplemented constraint union") + + constraints = self._constraints + if other not in self._constraints: + constraints += (other,) + + return UnionConstraint(*constraints) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, UnionConstraint): + return False + + return set(self._constraints) == set(other._constraints) + + def __hash__(self) -> int: + h = hash("union") + for constraint in self._constraints: + h ^= hash(constraint) + + return h + + def __str__(self) -> str: + constraints = [] + for constraint in self._constraints: + constraints.append(str(constraint)) + + return " || ".join(constraints) diff --git a/src/poetry/core/constraints/version/__init__.py b/src/poetry/core/constraints/version/__init__.py new file mode 100644 index 0000000..ad85d81 --- /dev/null +++ b/src/poetry/core/constraints/version/__init__.py @@ -0,0 +1,24 @@ +from __future__ import annotations + +from poetry.core.constraints.version.empty_constraint import EmptyConstraint +from poetry.core.constraints.version.parser import parse_constraint +from poetry.core.constraints.version.util import constraint_regions +from poetry.core.constraints.version.version import Version +from poetry.core.constraints.version.version_constraint import VersionConstraint +from poetry.core.constraints.version.version_range import VersionRange +from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from poetry.core.constraints.version.version_union import VersionUnion + + +__all__ = [ + "EmptyConstraint", + "Version", + "VersionConstraint", + "VersionRange", + "VersionRangeConstraint", + "VersionUnion", + "constraint_regions", + "parse_constraint", +] diff --git a/src/poetry/core/constraints/version/empty_constraint.py b/src/poetry/core/constraints/version/empty_constraint.py new file mode 100644 index 0000000..5c9f6f5 --- /dev/null +++ b/src/poetry/core/constraints/version/empty_constraint.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.version_constraint import VersionConstraint + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version import Version + from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, + ) + + +class EmptyConstraint(VersionConstraint): + def is_empty(self) -> bool: + return True + + def is_any(self) -> bool: + return False + + def is_simple(self) -> bool: + return True + + def allows(self, version: Version) -> bool: + return False + + def allows_all(self, other: VersionConstraint) -> bool: + return other.is_empty() + + def allows_any(self, other: VersionConstraint) -> bool: + return False + + def intersect(self, other: VersionConstraint) -> EmptyConstraint: + return self + + def union(self, other: VersionConstraint) -> VersionConstraint: + return other + + def difference(self, other: VersionConstraint) -> EmptyConstraint: + return self + + def flatten(self) -> list[VersionRangeConstraint]: + return [] + + def __str__(self) -> str: + return "" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionConstraint): + return False + + return other.is_empty() + + def __hash__(self) -> int: + return hash("empty") diff --git a/src/poetry/core/constraints/version/exceptions.py b/src/poetry/core/constraints/version/exceptions.py new file mode 100644 index 0000000..d06e56f --- /dev/null +++ b/src/poetry/core/constraints/version/exceptions.py @@ -0,0 +1,5 @@ +from __future__ import annotations + + +class ParseConstraintError(ValueError): + pass diff --git a/src/poetry/core/constraints/version/parser.py b/src/poetry/core/constraints/version/parser.py new file mode 100644 index 0000000..118a78c --- /dev/null +++ b/src/poetry/core/constraints/version/parser.py @@ -0,0 +1,167 @@ +from __future__ import annotations + +import re + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.exceptions import ParseConstraintError +from poetry.core.version.exceptions import InvalidVersion + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version_constraint import VersionConstraint + + +def parse_constraint(constraints: str) -> VersionConstraint: + if constraints == "*": + from poetry.core.constraints.version.version_range import VersionRange + + return VersionRange() + + or_constraints = re.split(r"\s*\|\|?\s*", constraints.strip()) + or_groups = [] + for constraints in or_constraints: + # allow trailing commas for robustness (even though it may not be + # standard-compliant it seems to occur in some packages) + constraints = constraints.rstrip(",").rstrip() + and_constraints = re.split( + "(?< ,]) *(? 1: + for constraint in and_constraints: + constraint_objects.append(parse_single_constraint(constraint)) + else: + constraint_objects.append(parse_single_constraint(and_constraints[0])) + + if len(constraint_objects) == 1: + constraint = constraint_objects[0] + else: + constraint = constraint_objects[0] + for next_constraint in constraint_objects[1:]: + constraint = constraint.intersect(next_constraint) + + or_groups.append(constraint) + + if len(or_groups) == 1: + return or_groups[0] + else: + from poetry.core.constraints.version.version_union import VersionUnion + + return VersionUnion.of(*or_groups) + + +def parse_single_constraint(constraint: str) -> VersionConstraint: + from poetry.core.constraints.version.patterns import BASIC_CONSTRAINT + from poetry.core.constraints.version.patterns import CARET_CONSTRAINT + from poetry.core.constraints.version.patterns import TILDE_CONSTRAINT + from poetry.core.constraints.version.patterns import TILDE_PEP440_CONSTRAINT + from poetry.core.constraints.version.patterns import X_CONSTRAINT + from poetry.core.constraints.version.version import Version + from poetry.core.constraints.version.version_range import VersionRange + from poetry.core.constraints.version.version_union import VersionUnion + + m = re.match(r"(?i)^v?[xX*](\.[xX*])*$", constraint) + if m: + return VersionRange() + + # Tilde range + m = TILDE_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + high = version.stable.next_minor() + if version.release.precision == 1: + high = version.stable.next_major() + + return VersionRange(version, high, include_min=True) + + # PEP 440 Tilde range (~=) + m = TILDE_PEP440_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + if version.release.precision == 2: + high = version.stable.next_major() + else: + high = version.stable.next_minor() + + return VersionRange(version, high, include_min=True) + + # Caret range + m = CARET_CONSTRAINT.match(constraint) + if m: + try: + version = Version.parse(m.group("version")) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + return VersionRange(version, version.next_breaking(), include_min=True) + + # X Range + m = X_CONSTRAINT.match(constraint) + if m: + op = m.group("op") + major = int(m.group(2)) + minor = m.group(3) + + if minor is not None: + version = Version.from_parts(major, int(minor), 0) + result: VersionConstraint = VersionRange( + version, version.next_minor(), include_min=True + ) + else: + if major == 0: + result = VersionRange(max=Version.from_parts(1, 0, 0)) + else: + version = Version.from_parts(major, 0, 0) + + result = VersionRange(version, version.next_major(), include_min=True) + + if op == "!=": + result = VersionRange().difference(result) + + return result + + # Basic comparator + m = BASIC_CONSTRAINT.match(constraint) + if m: + op = m.group("op") + version_string = m.group("version") + + if version_string == "dev": + version_string = "0.0-dev" + + try: + version = Version.parse(version_string) + except InvalidVersion as e: + raise ParseConstraintError( + f"Could not parse version constraint: {constraint}" + ) from e + + if op == "<": + return VersionRange(max=version) + if op == "<=": + return VersionRange(max=version, include_max=True) + if op == ">": + return VersionRange(min=version) + if op == ">=": + return VersionRange(min=version, include_min=True) + if op == "!=": + return VersionUnion(VersionRange(max=version), VersionRange(min=version)) + return version + + raise ParseConstraintError(f"Could not parse version constraint: {constraint}") diff --git a/src/poetry/core/constraints/version/patterns.py b/src/poetry/core/constraints/version/patterns.py new file mode 100644 index 0000000..0dd213c --- /dev/null +++ b/src/poetry/core/constraints/version/patterns.py @@ -0,0 +1,28 @@ +from __future__ import annotations + +import re + +from packaging.version import VERSION_PATTERN + + +COMPLETE_VERSION = re.compile(VERSION_PATTERN, re.VERBOSE | re.IGNORECASE) + +CARET_CONSTRAINT = re.compile( + rf"^\^(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +TILDE_CONSTRAINT = re.compile( + rf"^~(?!=)\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +TILDE_PEP440_CONSTRAINT = re.compile( + rf"^~=\s*(?P{VERSION_PATTERN})$", re.VERBOSE | re.IGNORECASE +) +X_CONSTRAINT = re.compile( + r"^(?P!=|==)?\s*v?(\d+)(?:\.(\d+))?(?:\.(\d+))?(?:\.[xX*])+$" +) + +# note that we also allow technically incorrect version patterns with astrix (eg: 3.5.*) +# as this is supported by pip and appears in metadata within python packages +BASIC_CONSTRAINT = re.compile( + rf"^(?P<>|!=|>=?|<=?|==?)?\s*(?P{VERSION_PATTERN}|dev)(\.\*)?$", + re.VERBOSE | re.IGNORECASE, +) diff --git a/src/poetry/core/constraints/version/util.py b/src/poetry/core/constraints/version/util.py new file mode 100644 index 0000000..d81d11c --- /dev/null +++ b/src/poetry/core/constraints/version/util.py @@ -0,0 +1,58 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.version_range import VersionRange + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version_constraint import VersionConstraint + + +def constraint_regions(constraints: list[VersionConstraint]) -> list[VersionRange]: + """ + Transform a list of VersionConstraints into a list of VersionRanges that mark out + the distinct regions of version-space. + + eg input >=3.6 and >=2.7,<3.0.0 || >=3.4.0 + output <2.7, >=2.7,<3.0.0, >=3.0.0,<3.4.0, >=3.4.0,<3.6, >=3.6. + """ + flattened = [] + for constraint in constraints: + flattened += constraint.flatten() + + mins = { + (constraint.min, not constraint.include_min) + for constraint in flattened + if constraint.min is not None + } + maxs = { + (constraint.max, constraint.include_max) + for constraint in flattened + if constraint.max is not None + } + + edges = sorted(mins | maxs) + if not edges: + return [VersionRange(None, None)] + + start = edges[0] + regions = [ + VersionRange(None, start[0], include_max=start[1]), + ] + + for low, high in zip(edges, edges[1:]): + version_range = VersionRange( + low[0], + high[0], + include_min=not low[1], + include_max=high[1], + ) + regions.append(version_range) + + end = edges[-1] + regions.append( + VersionRange(end[0], None, include_min=not end[1]), + ) + + return regions diff --git a/src/poetry/core/constraints/version/version.py b/src/poetry/core/constraints/version/version.py new file mode 100644 index 0000000..faba173 --- /dev/null +++ b/src/poetry/core/constraints/version/version.py @@ -0,0 +1,181 @@ +from __future__ import annotations + +import dataclasses + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.empty_constraint import EmptyConstraint +from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from poetry.core.constraints.version.version_union import VersionUnion +from poetry.core.version.pep440 import Release +from poetry.core.version.pep440.version import PEP440Version + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version_constraint import VersionConstraint + from poetry.core.version.pep440 import LocalSegmentType + from poetry.core.version.pep440 import ReleaseTag + + +@dataclasses.dataclass(frozen=True) +class Version(PEP440Version, VersionRangeConstraint): + """ + A version constraint representing a single version. + """ + + @property + def precision(self) -> int: + return self.release.precision + + @property + def stable(self) -> Version: + if self.is_stable(): + return self + + post = self.post if self.pre is None else None + return Version(release=self.release, post=post, epoch=self.epoch) + + def next_breaking(self) -> Version: + if self.major > 0 or self.minor is None: + return self.stable.next_major() + + if self.minor > 0 or self.patch is None: + return self.stable.next_minor() + + return self.stable.next_patch() + + @property + def min(self) -> Version: + return self + + @property + def max(self) -> Version: + return self + + @property + def full_max(self) -> Version: + return self + + @property + def include_min(self) -> bool: + return True + + @property + def include_max(self) -> bool: + return True + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def is_simple(self) -> bool: + return True + + def allows(self, version: Version | None) -> bool: + if version is None: + return False + + _this, _other = self, version + + # allow weak equality to allow `3.0.0+local.1` for `3.0.0` + if not _this.is_local() and _other.is_local(): + _other = _other.without_local() + + return _this == _other + + def allows_all(self, other: VersionConstraint) -> bool: + return other.is_empty() or ( + self.allows(other) if isinstance(other, self.__class__) else other == self + ) + + def allows_any(self, other: VersionConstraint) -> bool: + if isinstance(other, Version): + return self.allows(other) + + return other.allows(self) + + def intersect(self, other: VersionConstraint) -> Version | EmptyConstraint: + if other.allows(self): + return self + + if isinstance(other, Version) and self.allows(other): + return other + + return EmptyConstraint() + + def union(self, other: VersionConstraint) -> VersionConstraint: + from poetry.core.constraints.version.version_range import VersionRange + + if other.allows(self): + return other + + if isinstance(other, VersionRangeConstraint): + if self.allows(other.min): + return VersionRange( + other.min, + other.max, + include_min=True, + include_max=other.include_max, + ) + + if self.allows(other.max): + return VersionRange( + other.min, + other.max, + include_min=other.include_min, + include_max=True, + ) + + return VersionUnion.of(self, other) + + def difference(self, other: VersionConstraint) -> Version | EmptyConstraint: + if other.allows(self): + return EmptyConstraint() + + return self + + def flatten(self) -> list[VersionRangeConstraint]: + return [self] + + def __str__(self) -> str: + return self.text + + def __eq__(self, other: object) -> bool: + from poetry.core.constraints.version.version_range import VersionRange + + if isinstance(other, VersionRange): + return ( + self == other.min + and self == other.max + and (other.include_min or other.include_max) + ) + return super().__eq__(other) + + @classmethod + def from_parts( + cls, + major: int, + minor: int | None = None, + patch: int | None = None, + extra: int | tuple[int, ...] = (), + pre: ReleaseTag | None = None, + post: ReleaseTag | None = None, + dev: ReleaseTag | None = None, + local: LocalSegmentType = None, + *, + epoch: int = 0, + ) -> Version: + if isinstance(extra, int): + extra = (extra,) + return cls( + release=Release(major=major, minor=minor, patch=patch, extra=extra), + pre=pre, + post=post, + dev=dev, + local=local, + epoch=epoch, + ) diff --git a/src/poetry/core/constraints/version/version_constraint.py b/src/poetry/core/constraints/version/version_constraint.py new file mode 100644 index 0000000..cc1cda8 --- /dev/null +++ b/src/poetry/core/constraints/version/version_constraint.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version import Version + from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, + ) + + +class VersionConstraint: + @abstractmethod + def is_empty(self) -> bool: + raise NotImplementedError() + + @abstractmethod + def is_any(self) -> bool: + raise NotImplementedError() + + @abstractmethod + def is_simple(self) -> bool: + raise NotImplementedError() + + @abstractmethod + def allows(self, version: Version) -> bool: + raise NotImplementedError() + + @abstractmethod + def allows_all(self, other: VersionConstraint) -> bool: + raise NotImplementedError() + + @abstractmethod + def allows_any(self, other: VersionConstraint) -> bool: + raise NotImplementedError() + + @abstractmethod + def intersect(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError() + + @abstractmethod + def union(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError() + + @abstractmethod + def difference(self, other: VersionConstraint) -> VersionConstraint: + raise NotImplementedError() + + @abstractmethod + def flatten(self) -> list[VersionRangeConstraint]: + raise NotImplementedError() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {str(self)}>" + + def __str__(self) -> str: + raise NotImplementedError() + + def __hash__(self) -> int: + raise NotImplementedError() + + def __eq__(self, other: object) -> bool: + raise NotImplementedError() diff --git a/src/poetry/core/constraints/version/version_range.py b/src/poetry/core/constraints/version/version_range.py new file mode 100644 index 0000000..eb1f6e2 --- /dev/null +++ b/src/poetry/core/constraints/version/version_range.py @@ -0,0 +1,426 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.empty_constraint import EmptyConstraint +from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) +from poetry.core.constraints.version.version_union import VersionUnion + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version import Version + from poetry.core.constraints.version.version_constraint import VersionConstraint + + +class VersionRange(VersionRangeConstraint): + def __init__( + self, + min: Version | None = None, + max: Version | None = None, + include_min: bool = False, + include_max: bool = False, + always_include_max_prerelease: bool = False, + ) -> None: + full_max = max + if ( + not always_include_max_prerelease + and not include_max + and full_max is not None + and full_max.is_stable() + and not full_max.is_postrelease() + and (min is None or min.is_stable() or min.release != full_max.release) + ): + full_max = full_max.first_prerelease() + + self._min = min + self._max = max + self._full_max = full_max + self._include_min = include_min + self._include_max = include_max + + @property + def min(self) -> Version | None: + return self._min + + @property + def max(self) -> Version | None: + return self._max + + @property + def full_max(self) -> Version | None: + return self._full_max + + @property + def include_min(self) -> bool: + return self._include_min + + @property + def include_max(self) -> bool: + return self._include_max + + def is_empty(self) -> bool: + return False + + def is_any(self) -> bool: + return self._min is None and self._max is None + + def is_simple(self) -> bool: + return self._min is None or self._max is None + + def allows(self, other: Version) -> bool: + if self._min is not None: + if other < self._min: + return False + + if not self._include_min and other == self._min: + return False + + if self.full_max is not None: + _this, _other = self.full_max, other + + if not _this.is_local() and _other.is_local(): + # allow weak equality to allow `3.0.0+local.1` for `<=3.0.0` + _other = _other.without_local() + + if not _this.is_postrelease() and _other.is_postrelease(): + # allow weak equality to allow `3.0.0-1` for `<=3.0.0` + _other = _other.without_postrelease() + + if _other > _this: + return False + + if not self._include_max and _other == _this: + return False + + return True + + def allows_all(self, other: VersionConstraint) -> bool: + from poetry.core.constraints.version.version import Version + + if other.is_empty(): + return True + + if isinstance(other, Version): + return self.allows(other) + + if isinstance(other, VersionUnion): + return all([self.allows_all(constraint) for constraint in other.ranges]) + + if isinstance(other, VersionRangeConstraint): + return not other.allows_lower(self) and not other.allows_higher(self) + + raise ValueError(f"Unknown VersionConstraint type {other}.") + + def allows_any(self, other: VersionConstraint) -> bool: + from poetry.core.constraints.version.version import Version + + if other.is_empty(): + return False + + if isinstance(other, Version): + return self.allows(other) + + if isinstance(other, VersionUnion): + return any([self.allows_any(constraint) for constraint in other.ranges]) + + if isinstance(other, VersionRangeConstraint): + return not other.is_strictly_lower(self) and not other.is_strictly_higher( + self + ) + + raise ValueError(f"Unknown VersionConstraint type {other}.") + + def intersect(self, other: VersionConstraint) -> VersionConstraint: + from poetry.core.constraints.version.version import Version + + if other.is_empty(): + return other + + if isinstance(other, VersionUnion): + return other.intersect(self) + + # A range and a Version just yields the version if it's in the range. + if isinstance(other, Version): + if self.allows(other): + return other + + return EmptyConstraint() + + if not isinstance(other, VersionRangeConstraint): + raise ValueError(f"Unknown VersionConstraint type {other}.") + + if self.allows_lower(other): + if self.is_strictly_lower(other): + return EmptyConstraint() + + intersect_min = other.min + intersect_include_min = other.include_min + else: + if other.is_strictly_lower(self): + return EmptyConstraint() + + intersect_min = self._min + intersect_include_min = self._include_min + + if self.allows_higher(other): + intersect_max = other.max + intersect_include_max = other.include_max + else: + intersect_max = self._max + intersect_include_max = self._include_max + + if intersect_min is None and intersect_max is None: + return VersionRange() + + # If the range is just a single version. + if intersect_min == intersect_max: + # Because we already verified that the lower range isn't strictly + # lower, there must be some overlap. + assert intersect_include_min and intersect_include_max + assert intersect_min is not None + + return intersect_min + + # If we got here, there is an actual range. + return VersionRange( + intersect_min, intersect_max, intersect_include_min, intersect_include_max + ) + + def union(self, other: VersionConstraint) -> VersionConstraint: + from poetry.core.constraints.version.version import Version + + if isinstance(other, Version): + if self.allows(other): + return self + + if other == self.min: + return VersionRange( + self.min, self.max, include_min=True, include_max=self.include_max + ) + + if other == self.max: + return VersionRange( + self.min, self.max, include_min=self.include_min, include_max=True + ) + + return VersionUnion.of(self, other) + + if isinstance(other, VersionRangeConstraint): + # If the two ranges don't overlap, we won't be able to create a single + # VersionRange for both of them. + edges_touch = ( + self.max == other.min and (self.include_max or other.include_min) + ) or (self.min == other.max and (self.include_min or other.include_max)) + + if not edges_touch and not self.allows_any(other): + return VersionUnion.of(self, other) + + if self.allows_lower(other): + union_min = self.min + union_include_min = self.include_min + else: + union_min = other.min + union_include_min = other.include_min + + if self.allows_higher(other): + union_max = self.max + union_include_max = self.include_max + else: + union_max = other.max + union_include_max = other.include_max + + return VersionRange( + union_min, + union_max, + include_min=union_include_min, + include_max=union_include_max, + ) + + return VersionUnion.of(self, other) + + def difference(self, other: VersionConstraint) -> VersionConstraint: + from poetry.core.constraints.version.version import Version + + if other.is_empty(): + return self + + if isinstance(other, Version): + if not self.allows(other): + return self + + if other == self.min: + if not self.include_min: + return self + + return VersionRange(self.min, self.max, False, self.include_max) + + if other == self.max: + if not self.include_max: + return self + + return VersionRange(self.min, self.max, self.include_min, False) + + return VersionUnion.of( + VersionRange(self.min, other, self.include_min, False), + VersionRange(other, self.max, False, self.include_max), + ) + elif isinstance(other, VersionRangeConstraint): + if not self.allows_any(other): + return self + + before: VersionConstraint | None + if not self.allows_lower(other): + before = None + elif self.min == other.min: + before = self.min + else: + before = VersionRange( + self.min, other.min, self.include_min, not other.include_min + ) + + after: VersionConstraint | None + if not self.allows_higher(other): + after = None + elif self.max == other.max: + after = self.max + else: + after = VersionRange( + other.max, self.max, not other.include_max, self.include_max + ) + + if before is None and after is None: + return EmptyConstraint() + + if before is None: + assert after is not None + return after + + if after is None: + return before + + return VersionUnion.of(before, after) + elif isinstance(other, VersionUnion): + ranges: list[VersionRangeConstraint] = [] + current: VersionRangeConstraint = self + + for range in other.ranges: + # Skip any ranges that are strictly lower than [current]. + if range.is_strictly_lower(current): + continue + + # If we reach a range strictly higher than [current], no more ranges + # will be relevant so we can bail early. + if range.is_strictly_higher(current): + break + + difference = current.difference(range) + if difference.is_empty(): + return EmptyConstraint() + elif isinstance(difference, VersionUnion): + # If [range] split [current] in half, we only need to continue + # checking future ranges against the latter half. + ranges.append(difference.ranges[0]) + current = difference.ranges[-1] + else: + assert isinstance(difference, VersionRangeConstraint) + current = difference + + if not ranges: + return current + + return VersionUnion.of(*(ranges + [current])) + + raise ValueError(f"Unknown VersionConstraint type {other}.") + + def flatten(self) -> list[VersionRangeConstraint]: + return [self] + + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionRangeConstraint): + return False + + return ( + self._min == other.min + and self._max == other.max + and self._include_min == other.include_min + and self._include_max == other.include_max + ) + + def __lt__(self, other: VersionRangeConstraint) -> bool: + return self._cmp(other) < 0 + + def __le__(self, other: VersionRangeConstraint) -> bool: + return self._cmp(other) <= 0 + + def __gt__(self, other: VersionRangeConstraint) -> bool: + return self._cmp(other) > 0 + + def __ge__(self, other: VersionRangeConstraint) -> bool: + return self._cmp(other) >= 0 + + def _cmp(self, other: VersionRangeConstraint) -> int: + if self.min is None: + if other.min is None: + return self._compare_max(other) + + return -1 + elif other.min is None: + return 1 + + if self.min > other.min: + return 1 + elif self.min < other.min: + return -1 + + if self.include_min != other.include_min: + return -1 if self.include_min else 1 + + return self._compare_max(other) + + def _compare_max(self, other: VersionRangeConstraint) -> int: + if self.max is None: + if other.max is None: + return 0 + + return 1 + elif other.max is None: + return -1 + + if self.max > other.max: + return 1 + elif self.max < other.max: + return -1 + + if self.include_max != other.include_max: + return 1 if self.include_max else -1 + + return 0 + + def __str__(self) -> str: + text = "" + + if self.min is not None: + text += ">=" if self.include_min else ">" + text += self.min.text + + if self.max is not None: + if self.min is not None: + text += "," + + op = "<=" if self.include_max else "<" + text += f"{op}{self.max.text}" + + if self.min is None and self.max is None: + return "*" + + return text + + def __hash__(self) -> int: + return ( + hash(self.min) + ^ hash(self.max) + ^ hash(self.include_min) + ^ hash(self.include_max) + ) diff --git a/src/poetry/core/constraints/version/version_range_constraint.py b/src/poetry/core/constraints/version/version_range_constraint.py new file mode 100644 index 0000000..a777627 --- /dev/null +++ b/src/poetry/core/constraints/version/version_range_constraint.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from abc import abstractmethod +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.version_constraint import VersionConstraint + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version import Version + + +class VersionRangeConstraint(VersionConstraint): + @property + @abstractmethod + def min(self) -> Version | None: + raise NotImplementedError() + + @property + @abstractmethod + def max(self) -> Version | None: + raise NotImplementedError() + + @property + @abstractmethod + def full_max(self) -> Version | None: + raise NotImplementedError() + + @property + @abstractmethod + def include_min(self) -> bool: + raise NotImplementedError() + + @property + @abstractmethod + def include_max(self) -> bool: + raise NotImplementedError() + + def allows_lower(self, other: VersionRangeConstraint) -> bool: + if self.min is None: + return other.min is not None + + if other.min is None: + return False + + if self.min < other.min: + return True + + if self.min > other.min: + return False + + return self.include_min and not other.include_min + + def allows_higher(self, other: VersionRangeConstraint) -> bool: + if self.full_max is None: + return other.max is not None + + if other.full_max is None: + return False + + if self.full_max < other.full_max: + return False + + if self.full_max > other.full_max: + return True + + return self.include_max and not other.include_max + + def is_strictly_lower(self, other: VersionRangeConstraint) -> bool: + if self.full_max is None or other.min is None: + return False + + if self.full_max < other.min: + return True + + if self.full_max > other.min: + return False + + return not self.include_max or not other.include_min + + def is_strictly_higher(self, other: VersionRangeConstraint) -> bool: + return other.is_strictly_lower(self) + + def is_adjacent_to(self, other: VersionRangeConstraint) -> bool: + if self.max != other.min: + return False + + return ( + self.include_max + and not other.include_min + or not self.include_max + and other.include_min + ) diff --git a/src/poetry/core/constraints/version/version_union.py b/src/poetry/core/constraints/version/version_union.py new file mode 100644 index 0000000..57ca178 --- /dev/null +++ b/src/poetry/core/constraints/version/version_union.py @@ -0,0 +1,422 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version.empty_constraint import EmptyConstraint +from poetry.core.constraints.version.version_constraint import VersionConstraint +from poetry.core.constraints.version.version_range_constraint import ( + VersionRangeConstraint, +) + + +if TYPE_CHECKING: + from poetry.core.constraints.version.version import Version + + +class VersionUnion(VersionConstraint): + """ + A version constraint representing a union of multiple disjoint version + ranges. + + An instance of this will only be created if the version can't be represented + as a non-compound value. + """ + + def __init__(self, *ranges: VersionRangeConstraint) -> None: + self._ranges = list(ranges) + + @property + def ranges(self) -> list[VersionRangeConstraint]: + return self._ranges + + @classmethod + def of(cls, *ranges: VersionConstraint) -> VersionConstraint: + from poetry.core.constraints.version.version_range import VersionRange + + flattened: list[VersionRangeConstraint] = [] + for constraint in ranges: + if constraint.is_empty(): + continue + + if isinstance(constraint, VersionUnion): + flattened += constraint.ranges + continue + + assert isinstance(constraint, VersionRangeConstraint) + flattened.append(constraint) + + if not flattened: + return EmptyConstraint() + + if any([constraint.is_any() for constraint in flattened]): + return VersionRange() + + # Only allow Versions and VersionRanges here so we can more easily reason + # about everything in flattened. _EmptyVersions and VersionUnions are + # filtered out above. + for constraint in flattened: + if not isinstance(constraint, VersionRangeConstraint): + raise ValueError(f"Unknown VersionConstraint type {constraint}.") + + flattened.sort() + + merged: list[VersionRangeConstraint] = [] + for constraint in flattened: + # Merge this constraint with the previous one, but only if they touch. + if not merged or ( + not merged[-1].allows_any(constraint) + and not merged[-1].is_adjacent_to(constraint) + ): + merged.append(constraint) + else: + new_constraint = merged[-1].union(constraint) + assert isinstance(new_constraint, VersionRangeConstraint) + merged[-1] = new_constraint + + if len(merged) == 1: + return merged[0] + + return VersionUnion(*merged) + + def is_empty(self) -> bool: + return False + + def is_any(self) -> bool: + return False + + def is_simple(self) -> bool: + return self.excludes_single_version() + + def allows(self, version: Version) -> bool: + return any([constraint.allows(version) for constraint in self._ranges]) + + def allows_all(self, other: VersionConstraint) -> bool: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + if our_current_range.allows_all(their_current_range): + their_current_range = next(their_ranges, None) + else: + our_current_range = next(our_ranges, None) + + return their_current_range is None + + def allows_any(self, other: VersionConstraint) -> bool: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + if our_current_range.allows_any(their_current_range): + return True + + if their_current_range.allows_higher(our_current_range): + our_current_range = next(our_ranges, None) + else: + their_current_range = next(their_ranges, None) + + return False + + def intersect(self, other: VersionConstraint) -> VersionConstraint: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + new_ranges = [] + + our_current_range = next(our_ranges, None) + their_current_range = next(their_ranges, None) + + while our_current_range and their_current_range: + intersection = our_current_range.intersect(their_current_range) + + if not intersection.is_empty(): + new_ranges.append(intersection) + + if their_current_range.allows_higher(our_current_range): + our_current_range = next(our_ranges, None) + else: + their_current_range = next(their_ranges, None) + + return VersionUnion.of(*new_ranges) + + def union(self, other: VersionConstraint) -> VersionConstraint: + return VersionUnion.of(self, other) + + def difference(self, other: VersionConstraint) -> VersionConstraint: + our_ranges = iter(self._ranges) + their_ranges = iter(other.flatten()) + new_ranges: list[VersionConstraint] = [] + + state = { + "current": next(our_ranges, None), + "their_range": next(their_ranges, None), + } + + def their_next_range() -> bool: + state["their_range"] = next(their_ranges, None) + if state["their_range"]: + return True + + assert state["current"] is not None + new_ranges.append(state["current"]) + our_current = next(our_ranges, None) + while our_current: + new_ranges.append(our_current) + our_current = next(our_ranges, None) + + return False + + def our_next_range(include_current: bool = True) -> bool: + if include_current: + assert state["current"] is not None + new_ranges.append(state["current"]) + + our_current = next(our_ranges, None) + if not our_current: + return False + + state["current"] = our_current + + return True + + while True: + if state["their_range"] is None: + break + + assert state["current"] is not None + if state["their_range"].is_strictly_lower(state["current"]): + if not their_next_range(): + break + + continue + + if state["their_range"].is_strictly_higher(state["current"]): + if not our_next_range(): + break + + continue + + difference = state["current"].difference(state["their_range"]) + if isinstance(difference, VersionUnion): + assert len(difference.ranges) == 2 + new_ranges.append(difference.ranges[0]) + state["current"] = difference.ranges[-1] + + if not their_next_range(): + break + elif difference.is_empty(): + if not our_next_range(False): + break + else: + assert isinstance(difference, VersionRangeConstraint) + state["current"] = difference + + if state["current"].allows_higher(state["their_range"]): + if not their_next_range(): + break + else: + if not our_next_range(): + break + + if not new_ranges: + return EmptyConstraint() + + if len(new_ranges) == 1: + return new_ranges[0] + + return VersionUnion.of(*new_ranges) + + def flatten(self) -> list[VersionRangeConstraint]: + return self.ranges + + def _exclude_single_wildcard_range_string(self) -> str: + """ + Helper method to convert this instance into a wild card range + string. + """ + if not self.excludes_single_wildcard_range(): + raise ValueError("Not a valid wildcard range") + + # we assume here that since it is a single exclusion range + # that it is one of "< 2.0.0 || >= 2.1.0" or ">= 2.1.0 || < 2.0.0" + # and the one with the max is the first part + idx_order = (0, 1) if self._ranges[0].max else (1, 0) + one = self._ranges[idx_order[0]].max + assert one is not None + two = self._ranges[idx_order[1]].min + assert two is not None + + # versions can have both semver and non semver parts + parts_one = [ + one.major, + one.minor or 0, + one.patch or 0, + *list(one.non_semver_parts or []), + ] + parts_two = [ + two.major, + two.minor or 0, + two.patch or 0, + *list(two.non_semver_parts or []), + ] + + # we assume here that a wildcard range implies that the part following the + # first part that is different in the second range is the wildcard, this means + # that multiple wildcards are not supported right now. + parts = [] + + for idx, part in enumerate(parts_one): + parts.append(str(part)) + if parts_two[idx] != part: + # since this part is different the next one is the wildcard + # for example, "< 2.0.0 || >= 2.1.0" gets us a wildcard range + # 2.0.* + parts.append("*") + break + else: + # we should not ever get here, however it is likely that poorly + # constructed metadata exists + raise ValueError("Not a valid wildcard range") + + return f"!={'.'.join(parts)}" + + @staticmethod + def _excludes_single_wildcard_range_check_is_valid_range( + one: VersionRangeConstraint, two: VersionRangeConstraint + ) -> bool: + """ + Helper method to determine if two versions define a single wildcard range. + + In cases where !=2.0.* was parsed by us, the union is of the range + <2.0.0 || >=2.1.0. In user defined ranges, precision might be different. + For example, a union <2.0 || >= 2.1.0 is still !=2.0.*. In order to + handle these cases we make sure that if precisions do not match, extra + checks are performed to validate that the constraint is a valid single + wildcard range. + """ + + assert one.max is not None + assert two.min is not None + + max_precision = max(one.max.precision, two.min.precision) + + if max_precision <= 3: + # In cases where both versions have a precision less than 3, + # we can make use of the next major/minor/patch versions. + return two.min in { + one.max.next_major(), + one.max.next_minor(), + one.max.next_patch(), + } + else: + # When there are non-semver parts in one of the versions, we need to + # ensure we use zero padded version and in addition to next major/minor/ + # patch versions, also check each next release for the extra parts. + from_parts = one.max.__class__.from_parts + + _extras: list[list[int]] = [] + _versions: list[Version] = [] + + for _version in [one.max, two.min]: + _extra = list(_version.non_semver_parts or []) + + while len(_extra) < (max_precision - 3): + # pad zeros for extra parts to ensure precisions are equal + _extra.append(0) + + # create a new release with unspecified parts padded with zeros + _padded_version: Version = from_parts( + major=_version.major, + minor=_version.minor or 0, + patch=_version.patch or 0, + extra=tuple(_extra), + ) + + _extras.append(_extra) + _versions.append(_padded_version) + + _extra_one = _extras[0] + _padded_version_one = _versions[0] + _padded_version_two = _versions[1] + + _check_versions = { + _padded_version_one.next_major(), + _padded_version_one.next_minor(), + _padded_version_one.next_patch(), + } + + # for each non-semver (extra) part, bump a version + for idx in range(len(_extra_one)): + _extra = [ + *_extra_one[: idx - 1], + (_extra_one[idx] + 1), + *_extra_one[idx + 1 :], + ] + _check_versions.add( + from_parts( + _padded_version_one.major, + _padded_version_one.minor, + _padded_version_one.patch, + tuple(_extra), + ) + ) + + return _padded_version_two in _check_versions + + def excludes_single_wildcard_range(self) -> bool: + from poetry.core.constraints.version.version_range import VersionRange + + if len(self._ranges) != 2: + return False + + idx_order = (0, 1) if self._ranges[0].max else (1, 0) + one = self._ranges[idx_order[0]] + two = self._ranges[idx_order[1]] + + is_range_exclusion = ( + one.max and not one.include_max and two.min and two.include_min + ) + + if not is_range_exclusion: + return False + + if not self._excludes_single_wildcard_range_check_is_valid_range(one, two): + return False + + return isinstance(VersionRange().difference(self), VersionRange) + + def excludes_single_version(self) -> bool: + from poetry.core.constraints.version.version import Version + from poetry.core.constraints.version.version_range import VersionRange + + return isinstance(VersionRange().difference(self), Version) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, VersionUnion): + return False + + return self._ranges == other.ranges + + def __hash__(self) -> int: + h = hash(self._ranges[0]) + + for range in self._ranges[1:]: + h ^= hash(range) + + return h + + def __str__(self) -> str: + from poetry.core.constraints.version.version_range import VersionRange + + if self.excludes_single_version(): + return f"!={VersionRange().difference(self)}" + + try: + return self._exclude_single_wildcard_range_string() + except ValueError: + return " || ".join([str(r) for r in self._ranges]) diff --git a/src/poetry/core/exceptions/__init__.py b/src/poetry/core/exceptions/__init__.py new file mode 100644 index 0000000..d96ee12 --- /dev/null +++ b/src/poetry/core/exceptions/__init__.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.exceptions.base import PoetryCoreException + + +__all__ = ["PoetryCoreException"] diff --git a/src/poetry/core/exceptions/base.py b/src/poetry/core/exceptions/base.py new file mode 100644 index 0000000..4372762 --- /dev/null +++ b/src/poetry/core/exceptions/base.py @@ -0,0 +1,5 @@ +from __future__ import annotations + + +class PoetryCoreException(Exception): + pass diff --git a/src/poetry/core/factory.py b/src/poetry/core/factory.py new file mode 100644 index 0000000..1d56d78 --- /dev/null +++ b/src/poetry/core/factory.py @@ -0,0 +1,455 @@ +from __future__ import annotations + +import logging + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Dict +from typing import List +from typing import Mapping +from typing import Union +from warnings import warn + +from packaging.utils import canonicalize_name + +from poetry.core.utils.helpers import combine_unicode +from poetry.core.utils.helpers import readme_content_type + + +if TYPE_CHECKING: + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.dependency_group import DependencyGroup + from poetry.core.packages.project_package import ProjectPackage + from poetry.core.poetry import Poetry + from poetry.core.spdx.license import License + + DependencyConstraint = Union[str, Dict[str, Any]] + DependencyConfig = Mapping[ + str, Union[List[DependencyConstraint], DependencyConstraint] + ] + + +logger = logging.getLogger(__name__) + + +class Factory: + """ + Factory class to create various elements needed by Poetry. + """ + + def create_poetry( + self, cwd: Path | None = None, with_groups: bool = True + ) -> Poetry: + from poetry.core.poetry import Poetry + from poetry.core.pyproject.toml import PyProjectTOML + + poetry_file = self.locate(cwd) + local_config = PyProjectTOML(path=poetry_file).poetry_config + + # Checking validity + check_result = self.validate(local_config) + if check_result["errors"]: + message = "" + for error in check_result["errors"]: + message += f" - {error}\n" + + raise RuntimeError("The Poetry configuration is invalid:\n" + message) + + # Load package + name = local_config["name"] + assert isinstance(name, str) + version = local_config["version"] + assert isinstance(version, str) + package = self.get_package(name, version) + package = self.configure_package( + package, local_config, poetry_file.parent, with_groups=with_groups + ) + + return Poetry(poetry_file, local_config, package) + + @classmethod + def get_package(cls, name: str, version: str) -> ProjectPackage: + from poetry.core.packages.project_package import ProjectPackage + + return ProjectPackage(name, version, version) + + @classmethod + def _add_package_group_dependencies( + cls, + package: ProjectPackage, + group: str | DependencyGroup, + dependencies: DependencyConfig, + ) -> None: + from poetry.core.packages.dependency_group import MAIN_GROUP + + if isinstance(group, str): + if package.has_dependency_group(group): + group = package.dependency_group(group) + else: + from poetry.core.packages.dependency_group import DependencyGroup + + group = DependencyGroup(group) + + for name, constraints in dependencies.items(): + _constraints = ( + constraints if isinstance(constraints, list) else [constraints] + ) + for _constraint in _constraints: + if name.lower() == "python": + if group.name == MAIN_GROUP and isinstance(_constraint, str): + package.python_versions = _constraint + continue + + group.add_dependency( + cls.create_dependency( + name, + _constraint, + groups=[group.name], + root_dir=package.root_dir, + ) + ) + + package.add_dependency_group(group) + + @classmethod + def configure_package( + cls, + package: ProjectPackage, + config: dict[str, Any], + root: Path, + with_groups: bool = True, + ) -> ProjectPackage: + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.dependency_group import MAIN_GROUP + from poetry.core.packages.dependency_group import DependencyGroup + from poetry.core.spdx.helpers import license_by_id + + package.root_dir = root + + for author in config["authors"]: + package.authors.append(combine_unicode(author)) + + for maintainer in config.get("maintainers", []): + package.maintainers.append(combine_unicode(maintainer)) + + package.description = config.get("description", "") + package.homepage = config.get("homepage") + package.repository_url = config.get("repository") + package.documentation_url = config.get("documentation") + try: + license_: License | None = license_by_id(config.get("license", "")) + except ValueError: + license_ = None + + package.license = license_ + package.keywords = config.get("keywords", []) + package.classifiers = config.get("classifiers", []) + + if "readme" in config: + if isinstance(config["readme"], str): + package.readmes = (root / config["readme"],) + else: + package.readmes = tuple(root / readme for readme in config["readme"]) + + if "platform" in config: + package.platform = config["platform"] + + if "dependencies" in config: + cls._add_package_group_dependencies( + package=package, group=MAIN_GROUP, dependencies=config["dependencies"] + ) + + if with_groups and "group" in config: + for group_name, group_config in config["group"].items(): + group = DependencyGroup( + group_name, optional=group_config.get("optional", False) + ) + cls._add_package_group_dependencies( + package=package, + group=group, + dependencies=group_config["dependencies"], + ) + + if with_groups and "dev-dependencies" in config: + cls._add_package_group_dependencies( + package=package, group="dev", dependencies=config["dev-dependencies"] + ) + + extras = config.get("extras", {}) + for extra_name, requirements in extras.items(): + extra_name = canonicalize_name(extra_name) + package.extras[extra_name] = [] + + # Checking for dependency + for req in requirements: + req = Dependency(req, "*") + + for dep in package.requires: + if dep.name == req.name: + dep.in_extras.append(extra_name) + package.extras[extra_name].append(dep) + + if "build" in config: + build = config["build"] + if not isinstance(build, dict): + build = {"script": build} + package.build_config = build or {} + + if "include" in config: + package.include = [] + + for include in config["include"]: + if not isinstance(include, dict): + include = {"path": include} + + formats = include.get("format", []) + if formats and not isinstance(formats, list): + formats = [formats] + include["format"] = formats + + package.include.append(include) + + if "exclude" in config: + package.exclude = config["exclude"] + + if "packages" in config: + package.packages = config["packages"] + + # Custom urls + if "urls" in config: + package.custom_urls = config["urls"] + + return package + + @classmethod + def create_dependency( + cls, + name: str, + constraint: DependencyConstraint, + groups: list[str] | None = None, + root_dir: Path | None = None, + ) -> Dependency: + from poetry.core.constraints.generic import ( + parse_constraint as parse_generic_constraint, + ) + from poetry.core.constraints.version import ( + parse_constraint as parse_version_constraint, + ) + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.dependency_group import MAIN_GROUP + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + from poetry.core.packages.url_dependency import URLDependency + from poetry.core.packages.utils.utils import create_nested_marker + from poetry.core.packages.vcs_dependency import VCSDependency + from poetry.core.version.markers import AnyMarker + from poetry.core.version.markers import parse_marker + + if groups is None: + groups = [MAIN_GROUP] + + if constraint is None: + constraint = "*" + + if isinstance(constraint, dict): + optional = constraint.get("optional", False) + python_versions = constraint.get("python") + platform = constraint.get("platform") + markers = constraint.get("markers") + if "allows-prereleases" in constraint: + message = ( + f'The "{name}" dependency specifies ' + 'the "allows-prereleases" property, which is deprecated. ' + 'Use "allow-prereleases" instead.' + ) + warn(message, DeprecationWarning) + logger.warning(message) + + allows_prereleases = constraint.get( + "allow-prereleases", constraint.get("allows-prereleases", False) + ) + + dependency: Dependency + if "git" in constraint: + # VCS dependency + dependency = VCSDependency( + name, + "git", + constraint["git"], + branch=constraint.get("branch", None), + tag=constraint.get("tag", None), + rev=constraint.get("rev", None), + directory=constraint.get("subdirectory", None), + groups=groups, + optional=optional, + develop=constraint.get("develop", False), + extras=constraint.get("extras", []), + ) + elif "file" in constraint: + file_path = Path(constraint["file"]) + + dependency = FileDependency( + name, + file_path, + groups=groups, + base=root_dir, + extras=constraint.get("extras", []), + ) + elif "path" in constraint: + path = Path(constraint["path"]) + + if root_dir: + is_file = root_dir.joinpath(path).is_file() + else: + is_file = path.is_file() + + if is_file: + dependency = FileDependency( + name, + path, + groups=groups, + optional=optional, + base=root_dir, + extras=constraint.get("extras", []), + ) + else: + dependency = DirectoryDependency( + name, + path, + groups=groups, + optional=optional, + base=root_dir, + develop=constraint.get("develop", False), + extras=constraint.get("extras", []), + ) + elif "url" in constraint: + dependency = URLDependency( + name, + constraint["url"], + directory=constraint.get("subdirectory", None), + groups=groups, + optional=optional, + extras=constraint.get("extras", []), + ) + else: + version = constraint["version"] + + dependency = Dependency( + name, + version, + optional=optional, + groups=groups, + allows_prereleases=allows_prereleases, + extras=constraint.get("extras", []), + ) + + marker = parse_marker(markers) if markers else AnyMarker() + + if python_versions: + marker = marker.intersect( + parse_marker( + create_nested_marker( + "python_version", parse_version_constraint(python_versions) + ) + ) + ) + + if platform: + marker = marker.intersect( + parse_marker( + create_nested_marker( + "sys_platform", parse_generic_constraint(platform) + ) + ) + ) + + if not marker.is_any(): + dependency.marker = marker + + dependency.source_name = constraint.get("source") + else: + dependency = Dependency(name, constraint, groups=groups) + + return dependency + + @classmethod + def validate( + cls, config: dict[str, Any], strict: bool = False + ) -> dict[str, list[str]]: + """ + Checks the validity of a configuration + """ + from poetry.core.json import validate_object + + result: dict[str, list[str]] = {"errors": [], "warnings": []} + # Schema validation errors + validation_errors = validate_object(config, "poetry-schema") + + result["errors"] += validation_errors + + if strict: + # If strict, check the file more thoroughly + if "dependencies" in config: + python_versions = config["dependencies"]["python"] + if python_versions == "*": + result["warnings"].append( + "A wildcard Python dependency is ambiguous. " + "Consider specifying a more explicit one." + ) + + for name, constraint in config["dependencies"].items(): + if not isinstance(constraint, dict): + continue + + if "allows-prereleases" in constraint: + result["warnings"].append( + f'The "{name}" dependency specifies ' + 'the "allows-prereleases" property, which is deprecated. ' + 'Use "allow-prereleases" instead.' + ) + + # Checking for scripts with extras + if "scripts" in config: + scripts = config["scripts"] + config_extras = config.get("extras", {}) + + for name, script in scripts.items(): + if not isinstance(script, dict): + continue + + extras = script.get("extras", []) + for extra in extras: + if extra not in config_extras: + result["errors"].append( + f'Script "{name}" requires extra "{extra}" which is not' + " defined." + ) + + # Checking types of all readme files (must match) + if "readme" in config and not isinstance(config["readme"], str): + readme_types = {readme_content_type(r) for r in config["readme"]} + if len(readme_types) > 1: + result["errors"].append( + "Declared README files must be of same type: found" + f" {', '.join(sorted(readme_types))}" + ) + + return result + + @classmethod + def locate(cls, cwd: Path | None = None) -> Path: + cwd = Path(cwd or Path.cwd()) + candidates = [cwd] + candidates.extend(cwd.parents) + + for path in candidates: + poetry_file = path / "pyproject.toml" + + if poetry_file.exists(): + return poetry_file + + else: + raise RuntimeError( + f"Poetry could not find a pyproject.toml file in {cwd} or its parents" + ) diff --git a/src/poetry/core/json/__init__.py b/src/poetry/core/json/__init__.py new file mode 100644 index 0000000..c46a8d2 --- /dev/null +++ b/src/poetry/core/json/__init__.py @@ -0,0 +1,40 @@ +from __future__ import annotations + +import json +import os + +from typing import Any + + +SCHEMA_DIR = os.path.join(os.path.dirname(__file__), "schemas") + + +class ValidationError(ValueError): + pass + + +def validate_object(obj: dict[str, Any], schema_name: str) -> list[str]: + schema_file = os.path.join(SCHEMA_DIR, f"{schema_name}.json") + + if not os.path.exists(schema_file): + raise ValueError(f"Schema {schema_name} does not exist.") + + with open(schema_file, encoding="utf-8") as f: + schema = json.loads(f.read()) + + from jsonschema import Draft7Validator + + validator = Draft7Validator(schema) + validation_errors = sorted(validator.iter_errors(obj), key=lambda e: e.path) # type: ignore[no-any-return] + + errors = [] + + for error in validation_errors: + message = error.message + if error.path: + path = ".".join(str(x) for x in error.absolute_path) + message = f"[{path}] {message}" + + errors.append(message) + + return errors diff --git a/src/poetry/core/json/schemas/poetry-schema.json b/src/poetry/core/json/schemas/poetry-schema.json new file mode 100644 index 0000000..8ff976f --- /dev/null +++ b/src/poetry/core/json/schemas/poetry-schema.json @@ -0,0 +1,655 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "name": "Package", + "type": "object", + "additionalProperties": true, + "required": [ + "name", + "version", + "description", + "authors" + ], + "properties": { + "name": { + "type": "string", + "description": "Package name." + }, + "version": { + "type": "string", + "description": "Package version." + }, + "description": { + "type": "string", + "description": "Short package description.", + "pattern": "^[^\n]*$" + }, + "keywords": { + "type": "array", + "items": { + "type": "string", + "description": "A tag/keyword that this package relates to." + } + }, + "homepage": { + "type": "string", + "description": "Homepage URL for the project.", + "format": "uri" + }, + "repository": { + "type": "string", + "description": "Repository URL for the project.", + "format": "uri" + }, + "documentation": { + "type": "string", + "description": "Documentation URL for the project.", + "format": "uri" + }, + "license": { + "type": "string", + "description": "License name." + }, + "authors": { + "$ref": "#/definitions/authors" + }, + "maintainers": { + "$ref": "#/definitions/maintainers" + }, + "readme": { + "anyOf": [ + { + "type": "string", + "description": "The path to the README file." + }, + { + "type": "array", + "description": "A list of paths to the readme files.", + "items": { + "type": "string" + } + } + ] + }, + "classifiers": { + "type": "array", + "description": "A list of trove classifiers." + }, + "packages": { + "type": "array", + "description": "A list of packages to include in the final distribution.", + "items": { + "type": "object", + "description": "Information about where the package resides.", + "additionalProperties": false, + "required": [ + "include" + ], + "properties": { + "include": { + "$ref": "#/definitions/include-path" + }, + "from": { + "type": "string", + "description": "Where the source directory of the package resides." + }, + "format": { + "$ref": "#/definitions/package-formats" + } + } + } + }, + "include": { + "type": "array", + "description": "A list of files and folders to include.", + "items": { + "anyOf": [ + { + "$ref": "#/definitions/include-path" + }, + { + "type": "object", + "additionalProperties": false, + "required": [ + "path" + ], + "properties": { + "path": { + "$ref": "#/definitions/include-path" + }, + "format": { + "$ref": "#/definitions/package-formats" + } + } + } + ] + } + }, + "exclude": { + "type": "array", + "description": "A list of files and folders to exclude." + }, + "dependencies": { + "type": "object", + "description": "This is a hash of package name (keys) and version constraints (values) that are required to run this package.", + "required": [ + "python" + ], + "properties": { + "python": { + "type": "string", + "description": "The Python versions the package is compatible with." + } + }, + "$ref": "#/definitions/dependencies", + "additionalProperties": false + }, + "dev-dependencies": { + "type": "object", + "description": "This is a hash of package name (keys) and version constraints (values) that this package requires for developing it (testing tools and such).", + "$ref": "#/definitions/dependencies", + "additionalProperties": false + }, + "extras": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "array", + "items": { + "type": "string" + } + } + } + }, + "group": { + "type": "object", + "description": "This represents groups of dependencies", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "object", + "description": "This represents a single dependency group", + "required": [ + "dependencies" + ], + "properties": { + "optional": { + "type": "boolean", + "description": "Whether the dependency group is optional or not" + }, + "dependencies": { + "type": "object", + "description": "The dependencies of this dependency group", + "$ref": "#/definitions/dependencies", + "additionalProperties": false + } + }, + "additionalProperties": false + } + } + }, + "build": { + "$ref": "#/definitions/build-section" + }, + "scripts": { + "type": "object", + "description": "A hash of scripts to be installed.", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "oneOf": [ + { + "$ref": "#/definitions/script-legacy" + }, + { + "$ref": "#/definitions/script-table" + } + ] + } + } + }, + "plugins": { + "type": "object", + "description": "A hash of hashes representing plugins", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "type": "string" + } + } + } + } + }, + "urls": { + "type": "object", + "patternProperties": { + "^.+$": { + "type": "string", + "description": "The full url of the custom url." + } + } + } + }, + "definitions": { + "authors": { + "type": "array", + "description": "List of authors that contributed to the package. This is typically the main maintainers, not the full list.", + "items": { + "type": "string" + } + }, + "maintainers": { + "type": "array", + "description": "List of maintainers, other than the original author(s), that upkeep the package.", + "items": { + "type": "string" + } + }, + "include-path": { + "type": "string", + "description": "Path to file or directory to include." + }, + "package-format": { + "type": "string", + "enum": [ + "sdist", + "wheel" + ], + "description": "A Python packaging format." + }, + "package-formats": { + "oneOf": [ + { + "$ref": "#/definitions/package-format" + }, + { + "type": "array", + "items": { + "$ref": "#/definitions/package-format" + } + } + ], + "description": "The format(s) for which the package must be included." + }, + "dependencies": { + "type": "object", + "patternProperties": { + "^[a-zA-Z-_.0-9]+$": { + "oneOf": [ + { + "$ref": "#/definitions/dependency" + }, + { + "$ref": "#/definitions/long-dependency" + }, + { + "$ref": "#/definitions/git-dependency" + }, + { + "$ref": "#/definitions/file-dependency" + }, + { + "$ref": "#/definitions/path-dependency" + }, + { + "$ref": "#/definitions/url-dependency" + }, + { + "$ref": "#/definitions/multiple-constraints-dependency" + } + ] + } + } + }, + "dependency": { + "type": "string", + "description": "The constraint of the dependency." + }, + "long-dependency": { + "type": "object", + "required": [ + "version" + ], + "additionalProperties": false, + "properties": { + "version": { + "type": "string", + "description": "The constraint of the dependency." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "allow-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "allows-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." + }, + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + }, + "source": { + "type": "string", + "description": "The exclusive source used to search for this dependency." + } + } + }, + "git-dependency": { + "type": "object", + "required": [ + "git" + ], + "additionalProperties": false, + "properties": { + "git": { + "type": "string", + "description": "The url of the git repository.", + "format": "uri" + }, + "branch": { + "type": "string", + "description": "The branch to checkout." + }, + "tag": { + "type": "string", + "description": "The tag to checkout." + }, + "rev": { + "type": "string", + "description": "The revision to checkout." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "allow-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "allows-prereleases": { + "type": "boolean", + "description": "Whether the dependency allows prereleases or not." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." + }, + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + }, + "develop": { + "type": "boolean", + "description": "Whether to install the dependency in development mode." + } + } + }, + "file-dependency": { + "type": "object", + "required": [ + "file" + ], + "additionalProperties": false, + "properties": { + "file": { + "type": "string", + "description": "The path to the file." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." + }, + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + } + } + }, + "path-dependency": { + "type": "object", + "required": [ + "path" + ], + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "description": "The path to the dependency." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." + }, + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + }, + "develop": { + "type": "boolean", + "description": "Whether to install the dependency in development mode." + } + } + }, + "url-dependency": { + "type": "object", + "required": [ + "url" + ], + "additionalProperties": false, + "properties": { + "url": { + "type": "string", + "description": "The url to the file." + }, + "subdirectory": { + "type": "string", + "description": "The relative path to the directory where the package is located." + }, + "python": { + "type": "string", + "description": "The python versions for which the dependency should be installed." + }, + "platform": { + "type": "string", + "description": "The platform(s) for which the dependency should be installed." + }, + "markers": { + "type": "string", + "description": "The PEP 508 compliant environment markers for which the dependency should be installed." + }, + "optional": { + "type": "boolean", + "description": "Whether the dependency is optional or not." + }, + "extras": { + "type": "array", + "description": "The required extras for this dependency.", + "items": { + "type": "string" + } + } + } + }, + "multiple-constraints-dependency": { + "type": "array", + "minItems": 1, + "items": { + "oneOf": [ + { + "$ref": "#/definitions/dependency" + }, + { + "$ref": "#/definitions/long-dependency" + }, + { + "$ref": "#/definitions/git-dependency" + }, + { + "$ref": "#/definitions/file-dependency" + }, + { + "$ref": "#/definitions/path-dependency" + }, + { + "$ref": "#/definitions/url-dependency" + } + ] + } + }, + "script-table": { + "type": "object", + "oneOf": [ + { + "$ref": "#/definitions/extra-script-legacy" + }, + { + "$ref": "#/definitions/extra-scripts" + } + ] + }, + "script-legacy": { + "type": "string", + "description": "A simple script pointing to a callable object." + }, + "extra-scripts": { + "type": "object", + "description": "Either a console entry point or a script file that'll be included in the distribution package.", + "additionalProperties": false, + "properties": { + "reference": { + "type": "string", + "description": "If type is file this is the relative path of the script file, if console it is the module name." + }, + "type": { + "description": "Value can be either file or console.", + "type": "string", + "enum": [ + "file", + "console" + ] + }, + "extras": { + "type": "array", + "description": "The required extras for this script. Only applicable if type is console.", + "items": { + "type": "string" + } + } + }, + "required": [ + "reference", + "type" + ] + }, + "extra-script-legacy": { + "type": "object", + "description": "A script that should be installed only if extras are activated.", + "additionalProperties": false, + "properties": { + "callable": { + "$ref": "#/definitions/script-legacy", + "description": "The entry point of the script. Deprecated in favour of reference." + }, + "extras": { + "type": "array", + "description": "The required extras for this script.", + "items": { + "type": "string" + } + } + } + }, + "build-script": { + "type": "string", + "description": "The python script file used to build extensions." + }, + "build-config": { + "type": "object", + "description": "Build specific configurations.", + "additionalProperties": false, + "properties": { + "generate-setup-file": { + "type": "boolean", + "description": "Generate and include a setup.py file in sdist.", + "default": true + }, + "script": { + "$ref": "#/definitions/build-script" + } + } + }, + "build-section": { + "oneOf": [ + { + "$ref": "#/definitions/build-script" + }, + { + "$ref": "#/definitions/build-config" + } + ] + } + } +} diff --git a/src/poetry/core/masonry/__init__.py b/src/poetry/core/masonry/__init__.py new file mode 100644 index 0000000..943204a --- /dev/null +++ b/src/poetry/core/masonry/__init__.py @@ -0,0 +1,8 @@ +""" +This module handles the packaging and publishing +of python projects. + +A lot of the code used here has been taken from +`flit `__ and adapted +to work with the poetry codebase, so kudos to them for showing the way. +""" diff --git a/src/poetry/core/masonry/api.py b/src/poetry/core/masonry/api.py new file mode 100644 index 0000000..4cd7aef --- /dev/null +++ b/src/poetry/core/masonry/api.py @@ -0,0 +1,83 @@ +""" +PEP-517 compliant buildsystem API +""" +from __future__ import annotations + +import logging + +from pathlib import Path +from typing import Any + +from poetry.core.factory import Factory +from poetry.core.masonry.builders.sdist import SdistBuilder +from poetry.core.masonry.builders.wheel import WheelBuilder + + +log = logging.getLogger(__name__) + + +def get_requires_for_build_wheel( + config_settings: dict[str, Any] | None = None, +) -> list[str]: + """ + Returns an additional list of requirements for building, as PEP508 strings, + above and beyond those specified in the pyproject.toml file. + + This implementation is optional. At the moment it only returns an empty list, which would be the same as if + not define. So this is just for completeness for future implementation. + """ + + return [] + + +# For now, we require all dependencies to build either a wheel or an sdist. +get_requires_for_build_sdist = get_requires_for_build_wheel + + +def prepare_metadata_for_build_wheel( + metadata_directory: str, config_settings: dict[str, Any] | None = None +) -> str: + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + builder = WheelBuilder(poetry) + metadata_path = Path(metadata_directory) + dist_info = builder.prepare_metadata(metadata_path) + return dist_info.name + + +def build_wheel( + wheel_directory: str, + config_settings: dict[str, Any] | None = None, + metadata_directory: str | None = None, +) -> str: + """Builds a wheel, places it in wheel_directory""" + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + metadata_path = None if metadata_directory is None else Path(metadata_directory) + + return WheelBuilder.make_in( + poetry, Path(wheel_directory), metadata_directory=metadata_path + ) + + +def build_sdist( + sdist_directory: str, config_settings: dict[str, Any] | None = None +) -> str: + """Builds an sdist, places it in sdist_directory""" + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + + path = SdistBuilder(poetry).build(Path(sdist_directory)) + + return path.name + + +def build_editable( + wheel_directory: str, + config_settings: dict[str, Any] | None = None, + metadata_directory: str | None = None, +) -> str: + poetry = Factory().create_poetry(Path(".").resolve(), with_groups=False) + + return WheelBuilder.make_in(poetry, Path(wheel_directory), editable=True) + + +get_requires_for_build_editable = get_requires_for_build_wheel +prepare_metadata_for_build_editable = prepare_metadata_for_build_wheel diff --git a/src/poetry/core/masonry/builder.py b/src/poetry/core/masonry/builder.py new file mode 100644 index 0000000..3042885 --- /dev/null +++ b/src/poetry/core/masonry/builder.py @@ -0,0 +1,33 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from pathlib import Path + + from poetry.core.poetry import Poetry + + +class Builder: + def __init__(self, poetry: Poetry) -> None: + from poetry.core.masonry.builders.sdist import SdistBuilder + from poetry.core.masonry.builders.wheel import WheelBuilder + + self._poetry = poetry + + self._formats = { + "sdist": SdistBuilder, + "wheel": WheelBuilder, + } + + def build(self, fmt: str, executable: str | Path | None = None) -> None: + if fmt in self._formats: + builders = [self._formats[fmt]] + elif fmt == "all": + builders = list(self._formats.values()) + else: + raise ValueError(f"Invalid format: {fmt}") + + for builder in builders: + builder(self._poetry, executable=executable).build() diff --git a/src/poetry/core/masonry/builders/__init__.py b/src/poetry/core/masonry/builders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/masonry/builders/builder.py b/src/poetry/core/masonry/builders/builder.py new file mode 100644 index 0000000..9f56929 --- /dev/null +++ b/src/poetry/core/masonry/builders/builder.py @@ -0,0 +1,398 @@ +from __future__ import annotations + +import logging +import re +import sys +import warnings + +from collections import defaultdict +from pathlib import Path +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from poetry.core.poetry import Poetry + + +AUTHOR_REGEX = re.compile(r"(?u)^(?P[- .,\w\d'’\"()]+) <(?P.+?)>$") + +METADATA_BASE = """\ +Metadata-Version: 2.1 +Name: {name} +Version: {version} +Summary: {summary} +""" + +logger = logging.getLogger(__name__) + + +class Builder: + format: str | None = None + + def __init__( + self, + poetry: Poetry, + ignore_packages_formats: bool = False, + executable: Path | None = None, + ) -> None: + from poetry.core.masonry.metadata import Metadata + from poetry.core.masonry.utils.module import Module + + self._poetry = poetry + self._package = poetry.package + self._path: Path = poetry.file.parent + self._excluded_files: set[str] | None = None + self._executable = Path(executable or sys.executable) + + packages = [] + for p in self._package.packages: + formats = p.get("format") or None + + # Default to including the package in both sdist & wheel + # if the `format` key is not provided in the inline include table. + if formats is None: + formats = ["sdist", "wheel"] + + if not isinstance(formats, list): + formats = [formats] + + if ( + formats + and self.format + and self.format not in formats + and not ignore_packages_formats + ): + continue + + packages.append(p) + + includes = [] + for include in self._package.include: + formats = include.get("format", []) + + if ( + formats + and self.format + and self.format not in formats + and not ignore_packages_formats + ): + continue + + includes.append(include) + + self._module = Module( + self._package.name, + self._path.as_posix(), + packages=packages, + includes=includes, + ) + + self._meta = Metadata.from_package(self._package) + + @property + def executable(self) -> Path: + return self._executable + + @property + def default_target_dir(self) -> Path: + return self._path / "dist" + + def build(self, target_dir: Path | None) -> Path: + raise NotImplementedError() + + def find_excluded_files(self, fmt: str | None = None) -> set[str]: + if self._excluded_files is None: + from poetry.core.vcs import get_vcs + + # Checking VCS + vcs = get_vcs(self._path) + if not vcs: + vcs_ignored_files = set() + else: + vcs_ignored_files = set(vcs.get_ignored_files()) + + explicitly_excluded = set() + for excluded_glob in self._package.exclude: + for excluded in self._path.glob(str(excluded_glob)): + explicitly_excluded.add( + Path(excluded).relative_to(self._path).as_posix() + ) + + explicitly_included = set() + for inc in self._package.include: + if fmt and inc["format"] and fmt not in inc["format"]: + continue + + included_glob = inc["path"] + for included in self._path.glob(str(included_glob)): + explicitly_included.add( + Path(included).relative_to(self._path).as_posix() + ) + + ignored = (vcs_ignored_files | explicitly_excluded) - explicitly_included + for ignored_file in ignored: + logger.debug(f"Ignoring: {ignored_file}") + + self._excluded_files = ignored + + return self._excluded_files + + def is_excluded(self, filepath: str | Path) -> bool: + exclude_path = Path(filepath) + + while True: + if exclude_path.as_posix() in self.find_excluded_files(fmt=self.format): + return True + + if len(exclude_path.parts) > 1: + exclude_path = exclude_path.parent + else: + break + + return False + + def find_files_to_add(self, exclude_build: bool = True) -> set[BuildIncludeFile]: + """ + Finds all files to add to the tarball + """ + from poetry.core.masonry.utils.package_include import PackageInclude + + to_add = set() + + for include in self._module.includes: + include.refresh() + formats = include.formats or ["sdist"] + + for file in include.elements: + if "__pycache__" in str(file): + continue + + if ( + isinstance(include, PackageInclude) + and include.source + and self.format == "wheel" + ): + source_root = include.base + else: + source_root = self._path + + if file.is_dir(): + if self.format in formats: + for current_file in file.glob("**/*"): + include_file = BuildIncludeFile( + path=current_file, + project_root=self._path, + source_root=source_root, + ) + + if not current_file.is_dir() and not self.is_excluded( + include_file.relative_to_source_root() + ): + to_add.add(include_file) + continue + + include_file = BuildIncludeFile( + path=file, project_root=self._path, source_root=source_root + ) + + if self.is_excluded( + include_file.relative_to_project_root() + ) and isinstance(include, PackageInclude): + continue + + if file.suffix == ".pyc": + continue + + logger.debug(f"Adding: {str(file)}") + to_add.add(include_file) + + # add build script if it is specified and explicitly required + if self._package.build_script and not exclude_build: + to_add.add( + BuildIncludeFile( + path=self._package.build_script, + project_root=self._path, + source_root=self._path, + ) + ) + + return to_add + + def get_metadata_content(self) -> str: + content = METADATA_BASE.format( + name=self._meta.name, + version=self._meta.version, + summary=str(self._meta.summary), + ) + + # Optional fields + if self._meta.home_page: + content += f"Home-page: {self._meta.home_page}\n" + + if self._meta.license: + content += f"License: {self._meta.license}\n" + + if self._meta.keywords: + content += f"Keywords: {self._meta.keywords}\n" + + if self._meta.author: + content += f"Author: {str(self._meta.author)}\n" + + if self._meta.author_email: + content += f"Author-email: {str(self._meta.author_email)}\n" + + if self._meta.maintainer: + content += f"Maintainer: {str(self._meta.maintainer)}\n" + + if self._meta.maintainer_email: + content += f"Maintainer-email: {str(self._meta.maintainer_email)}\n" + + if self._meta.requires_python: + content += f"Requires-Python: {self._meta.requires_python}\n" + + for classifier in self._meta.classifiers: + content += f"Classifier: {classifier}\n" + + for extra in sorted(self._meta.provides_extra): + content += f"Provides-Extra: {extra}\n" + + for dep in sorted(self._meta.requires_dist): + content += f"Requires-Dist: {dep}\n" + + for url in sorted(self._meta.project_urls, key=lambda u: u[0]): + content += f"Project-URL: {str(url)}\n" + + if self._meta.description_content_type: + content += ( + f"Description-Content-Type: {self._meta.description_content_type}\n" + ) + + if self._meta.description is not None: + content += "\n" + str(self._meta.description) + "\n" + + return content + + def convert_entry_points(self) -> dict[str, list[str]]: + result = defaultdict(list) + + # Scripts -> Entry points + for name, specification in self._poetry.local_config.get("scripts", {}).items(): + if isinstance(specification, str): + # TODO: deprecate this in favour or reference + specification = {"reference": specification, "type": "console"} + + if "callable" in specification: + warnings.warn( + f"Use of callable in script specification ({name}) is deprecated." + " Use reference instead.", + DeprecationWarning, + ) + specification = { + "reference": specification["callable"], + "type": "console", + } + + if specification.get("type") != "console": + continue + + extras = specification.get("extras", []) + extras = f"[{', '.join(extras)}]" if extras else "" + reference = specification.get("reference") + + if reference: + result["console_scripts"].append(f"{name} = {reference}{extras}") + + # Plugins -> entry points + plugins = self._poetry.local_config.get("plugins", {}) + for groupname, group in plugins.items(): + for name, specification in sorted(group.items()): + result[groupname].append(f"{name} = {specification}") + + for groupname in result: + result[groupname] = sorted(result[groupname]) + + return dict(result) + + def convert_script_files(self) -> list[Path]: + script_files: list[Path] = [] + + for name, specification in self._poetry.local_config.get("scripts", {}).items(): + if isinstance(specification, dict) and specification.get("type") == "file": + source = specification["reference"] + + if Path(source).is_absolute(): + raise RuntimeError( + f"{source} in {name} is an absolute path. Expected relative" + " path." + ) + + abs_path = Path.joinpath(self._path, source) + + if not abs_path.exists(): + raise RuntimeError( + f"{abs_path} in script specification ({name}) is not found." + ) + + if not abs_path.is_file(): + raise RuntimeError( + f"{abs_path} in script specification ({name}) is not a file." + ) + + script_files.append(abs_path) + + return script_files + + @classmethod + def convert_author(cls, author: str) -> dict[str, str]: + m = AUTHOR_REGEX.match(author) + if m is None: + raise RuntimeError(f"{author} does not match regex") + + name = m.group("name") + email = m.group("email") + + return {"name": name, "email": email} + + +class BuildIncludeFile: + def __init__( + self, + path: Path | str, + project_root: Path | str, + source_root: Path | str | None = None, + ) -> None: + """ + :param project_root: the full path of the project's root + :param path: a full path to the file to be included + :param source_root: the root path to resolve to + """ + self.path = Path(path) + self.project_root = Path(project_root).resolve() + self.source_root = None if not source_root else Path(source_root).resolve() + if not self.path.is_absolute() and self.source_root: + self.path = self.source_root / self.path + else: + self.path = self.path + + self.path = self.path.resolve() + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BuildIncludeFile): + return False + + return self.path == other.path + + def __hash__(self) -> int: + return hash(self.path) + + def __repr__(self) -> str: + return str(self.path) + + def relative_to_project_root(self) -> Path: + return self.path.relative_to(self.project_root) + + def relative_to_source_root(self) -> Path: + if self.source_root is not None: + return self.path.relative_to(self.source_root) + + return self.path diff --git a/src/poetry/core/masonry/builders/sdist.py b/src/poetry/core/masonry/builders/sdist.py new file mode 100644 index 0000000..a791088 --- /dev/null +++ b/src/poetry/core/masonry/builders/sdist.py @@ -0,0 +1,423 @@ +from __future__ import annotations + +import logging +import os +import re +import tarfile + +from collections import defaultdict +from contextlib import contextmanager +from copy import copy +from gzip import GzipFile +from io import BytesIO +from pathlib import Path +from posixpath import join as pjoin +from pprint import pformat +from typing import TYPE_CHECKING +from typing import Iterator + +from poetry.core.masonry.builders.builder import Builder +from poetry.core.masonry.builders.builder import BuildIncludeFile +from poetry.core.masonry.utils.helpers import distribution_name + + +if TYPE_CHECKING: + from tarfile import TarInfo + + from poetry.core.masonry.utils.package_include import PackageInclude + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.project_package import ProjectPackage + +SETUP = """\ +# -*- coding: utf-8 -*- +from setuptools import setup + +{before} +setup_kwargs = {{ + 'name': {name!r}, + 'version': {version!r}, + 'description': {description!r}, + 'long_description': {long_description!r}, + 'author': {author!r}, + 'author_email': {author_email!r}, + 'maintainer': {maintainer!r}, + 'maintainer_email': {maintainer_email!r}, + 'url': {url!r}, + {extra} +}} +{after} + +setup(**setup_kwargs) +""" + +logger = logging.getLogger(__name__) + + +class SdistBuilder(Builder): + format = "sdist" + + def build( + self, + target_dir: Path | None = None, + ) -> Path: + logger.info("Building sdist") + target_dir = target_dir or self.default_target_dir + + if not target_dir.exists(): + target_dir.mkdir(parents=True) + + name = distribution_name(self._package.name) + target = target_dir / f"{name}-{self._meta.version}.tar.gz" + gz = GzipFile(target.as_posix(), mode="wb", mtime=0) + tar = tarfile.TarFile( + target.as_posix(), mode="w", fileobj=gz, format=tarfile.PAX_FORMAT + ) + + try: + tar_dir = f"{name}-{self._meta.version}" + + files_to_add = self.find_files_to_add(exclude_build=False) + + for file in sorted(files_to_add, key=lambda x: x.relative_to_source_root()): + tar_info = tar.gettarinfo( + str(file.path), + arcname=pjoin(tar_dir, str(file.relative_to_source_root())), + ) + tar_info = self.clean_tarinfo(tar_info) + + if tar_info.isreg(): + with file.path.open("rb") as f: + tar.addfile(tar_info, f) + else: + tar.addfile(tar_info) # Symlinks & ? + + if self._poetry.package.build_should_generate_setup(): + setup = self.build_setup() + tar_info = tarfile.TarInfo(pjoin(tar_dir, "setup.py")) + tar_info.size = len(setup) + tar_info.mtime = 0 + tar_info = self.clean_tarinfo(tar_info) + tar.addfile(tar_info, BytesIO(setup)) + + pkg_info = self.build_pkg_info() + + tar_info = tarfile.TarInfo(pjoin(tar_dir, "PKG-INFO")) + tar_info.size = len(pkg_info) + tar_info.mtime = 0 + tar_info = self.clean_tarinfo(tar_info) + tar.addfile(tar_info, BytesIO(pkg_info)) + finally: + tar.close() + gz.close() + + logger.info(f"Built {target.name}") + return target + + def build_setup(self) -> bytes: + from poetry.core.masonry.utils.package_include import PackageInclude + + before, extra, after = [], [], [] + package_dir: dict[str, str] = {} + + # If we have a build script, use it + if self._package.build_script: + import_name = ".".join( + Path(self._package.build_script).with_suffix("").parts + ) + after += [f"from {import_name} import *", "build(setup_kwargs)"] + + modules = [] + packages = [] + package_data = {} + for include in self._module.includes: + if include.formats and "sdist" not in include.formats: + continue + + if isinstance(include, PackageInclude): + if include.is_package(): + pkg_dir, _packages, _package_data = self.find_packages(include) + + if pkg_dir is not None: + pkg_root = os.path.relpath(pkg_dir, str(self._path)) + if "" in package_dir: + package_dir.update( + (p, os.path.join(pkg_root, p.replace(".", "/"))) + for p in _packages + ) + else: + package_dir[""] = pkg_root + + packages += [p for p in _packages if p not in packages] + package_data.update(_package_data) + else: + module = include.elements[0].relative_to(include.base).stem + + if include.source is not None: + package_dir[""] = str(include.base.relative_to(self._path)) + + if module not in modules: + modules.append(module) + else: + pass + + if package_dir: + before.append(f"package_dir = \\\n{pformat(package_dir)}\n") + extra.append("'package_dir': package_dir,") + + if packages: + before.append(f"packages = \\\n{pformat(sorted(packages))}\n") + extra.append("'packages': packages,") + + if package_data: + before.append(f"package_data = \\\n{pformat(package_data)}\n") + extra.append("'package_data': package_data,") + + if modules: + before.append(f"modules = \\\n{pformat(modules)}") + extra.append("'py_modules': modules,") + + dependencies, extras = self.convert_dependencies( + self._package, self._package.requires + ) + if dependencies: + before.append(f"install_requires = \\\n{pformat(sorted(dependencies))}\n") + extra.append("'install_requires': install_requires,") + + if extras: + before.append(f"extras_require = \\\n{pformat(extras)}\n") + extra.append("'extras_require': extras_require,") + + entry_points = self.convert_entry_points() + if entry_points: + before.append(f"entry_points = \\\n{pformat(entry_points)}\n") + extra.append("'entry_points': entry_points,") + + script_files = self.convert_script_files() + if script_files: + rel_paths = [str(p.relative_to(self._path)) for p in script_files] + before.append(f"scripts = \\\n{pformat(rel_paths)}\n") + extra.append("'scripts': scripts,") + + if self._package.python_versions != "*": + python_requires = self._meta.requires_python + + extra.append(f"'python_requires': {python_requires!r},") + + return SETUP.format( + before="\n".join(before), + name=str(self._meta.name), + version=self._meta.version, + description=str(self._meta.summary), + long_description=str(self._meta.description), + author=str(self._meta.author), + author_email=str(self._meta.author_email), + maintainer=str(self._meta.maintainer), + maintainer_email=str(self._meta.maintainer_email), + url=str(self._meta.home_page), + extra="\n ".join(extra), + after="\n".join(after), + ).encode() + + @contextmanager + def setup_py(self) -> Iterator[Path]: + setup = self._path / "setup.py" + has_setup = setup.exists() + + if has_setup: + logger.warning("A setup.py file already exists. Using it.") + else: + with setup.open("w", encoding="utf-8") as f: + f.write(self.build_setup().decode()) + + yield setup + + if not has_setup: + setup.unlink() + + def build_pkg_info(self) -> bytes: + return self.get_metadata_content().encode() + + def find_packages( + self, include: PackageInclude + ) -> tuple[str | None, list[str], dict[str, list[str]]]: + """ + Discover subpackages and data. + + It also retrieves necessary files. + """ + pkgdir = None + if include.source is not None: + pkgdir = str(include.base) + + base = str(include.elements[0].parent) + + pkg_name = include.package + pkg_data: dict[str, list[str]] = defaultdict(list) + # Undocumented setup() feature: + # the empty string matches all package names + pkg_data[""].append("*") + packages = [pkg_name] + subpkg_paths = set() + + def find_nearest_pkg(rel_path: str) -> tuple[str, str]: + parts = rel_path.split(os.sep) + for i in reversed(range(1, len(parts))): + ancestor = "/".join(parts[:i]) + if ancestor in subpkg_paths: + pkg = ".".join([pkg_name] + parts[:i]) + return pkg, "/".join(parts[i:]) + + # Relative to the top-level package + return pkg_name, Path(rel_path).as_posix() + + for path, _dirnames, filenames in os.walk(str(base), topdown=True): + if os.path.basename(path) == "__pycache__": + continue + + from_top_level = os.path.relpath(path, base) + if from_top_level == ".": + continue + + is_subpkg = any( + [filename.endswith(".py") for filename in filenames] + ) and not all( + [ + self.is_excluded(Path(path, filename).relative_to(self._path)) + for filename in filenames + if filename.endswith(".py") + ] + ) + if is_subpkg: + subpkg_paths.add(from_top_level) + parts = from_top_level.split(os.sep) + packages.append(".".join([pkg_name] + parts)) + else: + pkg, from_nearest_pkg = find_nearest_pkg(from_top_level) + + data_elements = [ + f.relative_to(self._path) + for f in Path(path).glob("*") + if not f.is_dir() + ] + + data = [e for e in data_elements if not self.is_excluded(e)] + if not data: + continue + + if len(data) == len(data_elements): + pkg_data[pkg].append(pjoin(from_nearest_pkg, "*")) + else: + for d in data: + if d.is_dir(): + continue + + pkg_data[pkg] += [pjoin(from_nearest_pkg, d.name) for d in data] + + # Sort values in pkg_data + pkg_data = {k: sorted(v) for (k, v) in pkg_data.items() if v} + + return pkgdir, sorted(packages), pkg_data + + def find_files_to_add(self, exclude_build: bool = False) -> set[BuildIncludeFile]: + to_add = super().find_files_to_add(exclude_build) + + # add any additional files, starting with all LICENSE files + additional_files = set(self._path.glob("LICENSE*")) + + # add script files + additional_files.update(self.convert_script_files()) + + # Include project files + additional_files.add(Path("pyproject.toml")) + + # add readme if it is specified + if "readme" in self._poetry.local_config: + additional_files.add(self._poetry.local_config["readme"]) + + for additional_file in additional_files: + file = BuildIncludeFile( + path=additional_file, project_root=self._path, source_root=self._path + ) + if file.path.exists(): + logger.debug(f"Adding: {file.relative_to_source_root()}") + to_add.add(file) + + return to_add + + @classmethod + def convert_dependencies( + cls, package: ProjectPackage, dependencies: list[Dependency] + ) -> tuple[list[str], dict[str, list[str]]]: + main = [] + extras = defaultdict(list) + req_regex = re.compile(r"^(.+) \((.+)\)$") + + for dependency in dependencies: + if dependency.is_optional(): + for extra_name, reqs in package.extras.items(): + for req in reqs: + if req.name == dependency.name: + requirement = dependency.to_pep_508(with_extras=False) + if ";" in requirement: + requirement, conditions = requirement.split(";") + + requirement = requirement.strip() + if req_regex.match(requirement): + requirement = req_regex.sub( + "\\1\\2", requirement.strip() + ) + + extras[extra_name + ":" + conditions.strip()].append( + requirement + ) + + continue + + requirement = requirement.strip() + if req_regex.match(requirement): + requirement = req_regex.sub( + "\\1\\2", requirement.strip() + ) + extras[extra_name].append(requirement) + continue + + requirement = dependency.to_pep_508() + if ";" in requirement: + requirement, conditions = requirement.split(";") + + requirement = requirement.strip() + if req_regex.match(requirement): + requirement = req_regex.sub("\\1\\2", requirement.strip()) + + extras[":" + conditions.strip()].append(requirement) + + continue + + requirement = requirement.strip() + if req_regex.match(requirement): + requirement = req_regex.sub("\\1\\2", requirement.strip()) + + main.append(requirement) + + return main, dict(extras) + + @classmethod + def clean_tarinfo(cls, tar_info: TarInfo) -> TarInfo: + """ + Clean metadata from a TarInfo object to make it more reproducible. + + - Set uid & gid to 0 + - Set uname and gname to "" + - Normalise permissions to 644 or 755 + - Set mtime if not None + """ + from poetry.core.masonry.utils.helpers import normalize_file_permissions + + ti = copy(tar_info) + ti.uid = 0 + ti.gid = 0 + ti.uname = "" + ti.gname = "" + ti.mode = normalize_file_permissions(ti.mode) + + return ti diff --git a/src/poetry/core/masonry/builders/wheel.py b/src/poetry/core/masonry/builders/wheel.py new file mode 100644 index 0000000..ba0f4ed --- /dev/null +++ b/src/poetry/core/masonry/builders/wheel.py @@ -0,0 +1,428 @@ +from __future__ import annotations + +import contextlib +import csv +import hashlib +import logging +import os +import shutil +import stat +import subprocess +import tempfile +import zipfile + +from base64 import urlsafe_b64encode +from io import StringIO +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Iterator +from typing import TextIO + +from packaging.tags import sys_tags + +from poetry.core import __version__ +from poetry.core.constraints.version import parse_constraint +from poetry.core.masonry.builders.builder import Builder +from poetry.core.masonry.builders.sdist import SdistBuilder +from poetry.core.masonry.utils.helpers import distribution_name +from poetry.core.masonry.utils.helpers import normalize_file_permissions +from poetry.core.masonry.utils.package_include import PackageInclude +from poetry.core.utils.helpers import temporary_directory + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + from poetry.core.poetry import Poetry + +wheel_file_template = """\ +Wheel-Version: 1.0 +Generator: poetry-core {version} +Root-Is-Purelib: {pure_lib} +Tag: {tag} +""" + +logger = logging.getLogger(__name__) + + +class WheelBuilder(Builder): + format = "wheel" + + def __init__( + self, + poetry: Poetry, + original: Path | None = None, + executable: Path | None = None, + editable: bool = False, + metadata_directory: Path | None = None, + ) -> None: + super().__init__(poetry, executable=executable) + + self._records: list[tuple[str, str, int]] = [] + self._original_path = self._path + if original: + self._original_path = original.parent + self._editable = editable + self._metadata_directory = metadata_directory + + @classmethod + def make_in( + cls, + poetry: Poetry, + directory: Path | None = None, + original: Path | None = None, + executable: Path | None = None, + editable: bool = False, + metadata_directory: Path | None = None, + ) -> str: + wb = WheelBuilder( + poetry, + original=original, + executable=executable, + editable=editable, + metadata_directory=metadata_directory, + ) + wb.build(target_dir=directory) + + return wb.wheel_filename + + @classmethod + def make(cls, poetry: Poetry, executable: Path | None = None) -> None: + """Build a wheel in the dist/ directory, and optionally upload it.""" + cls.make_in(poetry, executable=executable) + + def build( + self, + target_dir: Path | None = None, + ) -> Path: + logger.info("Building wheel") + + target_dir = target_dir or self.default_target_dir + if not target_dir.exists(): + target_dir.mkdir() + + (fd, temp_path) = tempfile.mkstemp(suffix=".whl") + + st_mode = os.stat(temp_path).st_mode + new_mode = normalize_file_permissions(st_mode) + os.chmod(temp_path, new_mode) + + with os.fdopen(fd, "w+b") as fd_file, zipfile.ZipFile( + fd_file, mode="w", compression=zipfile.ZIP_DEFLATED + ) as zip_file: + if self._editable: + self._build(zip_file) + self._add_pth(zip_file) + elif self._poetry.package.build_should_generate_setup(): + self._copy_module(zip_file) + self._build(zip_file) + else: + self._build(zip_file) + self._copy_module(zip_file) + + self._copy_file_scripts(zip_file) + + if self._metadata_directory is None: + with temporary_directory() as temp_dir: + metadata_directory = self.prepare_metadata(Path(temp_dir)) + self._copy_dist_info(zip_file, metadata_directory) + else: + self._copy_dist_info(zip_file, self._metadata_directory) + + self._write_record(zip_file) + + wheel_path = target_dir / self.wheel_filename + if wheel_path.exists(): + wheel_path.unlink() + shutil.move(temp_path, str(wheel_path)) + + logger.info(f"Built {self.wheel_filename}") + return wheel_path + + def _add_pth(self, wheel: zipfile.ZipFile) -> None: + paths = set() + for include in self._module.includes: + if isinstance(include, PackageInclude) and ( + include.is_module() or include.is_package() + ): + paths.add(include.base.resolve().as_posix()) + + content = "" + for path in paths: + content += path + os.linesep + + pth_file = Path(self._module.name).with_suffix(".pth") + + with self._write_to_zip(wheel, str(pth_file)) as f: + f.write(content) + + def _build(self, wheel: zipfile.ZipFile) -> None: + if self._package.build_script: + if not self._poetry.package.build_should_generate_setup(): + # Since we have a build script but no setup.py generation is required, + # we assume that the build script will build and copy the files + # directly. + # That way they will be picked up when adding files to the wheel. + current_path = os.getcwd() + try: + os.chdir(str(self._path)) + self._run_build_script(self._package.build_script) + finally: + os.chdir(current_path) + else: + with SdistBuilder(poetry=self._poetry).setup_py() as setup: + # We need to place ourselves in the temporary + # directory in order to build the package + current_path = os.getcwd() + try: + os.chdir(str(self._path)) + self._run_build_command(setup) + finally: + os.chdir(current_path) + + build_dir = self._path / "build" + libs: list[Path] = list(build_dir.glob("lib.*")) + if not libs: + # The result of building the extensions + # does not exist, this may due to conditional + # builds, so we assume that it's okay + return + + lib = libs[0] + + for pkg in lib.glob("**/*"): + if pkg.is_dir() or self.is_excluded(pkg): + continue + + rel_path = str(pkg.relative_to(lib)) + + if rel_path in wheel.namelist(): + continue + + logger.debug(f"Adding: {rel_path}") + + self._add_file(wheel, pkg, rel_path) + + def _copy_file_scripts(self, wheel: zipfile.ZipFile) -> None: + file_scripts = self.convert_script_files() + + for abs_path in file_scripts: + self._add_file( + wheel, + abs_path, + Path.joinpath(Path(self.wheel_data_folder), "scripts", abs_path.name), + ) + + def _run_build_command(self, setup: Path) -> None: + subprocess.check_call( + [ + self.executable.as_posix(), + str(setup), + "build", + "-b", + str(self._path / "build"), + ] + ) + + def _run_build_script(self, build_script: str) -> None: + logger.debug(f"Executing build script: {build_script}") + subprocess.check_call([self.executable.as_posix(), build_script]) + + def _copy_module(self, wheel: zipfile.ZipFile) -> None: + to_add = self.find_files_to_add() + + # Walk the files and compress them, + # sorting everything so the order is stable. + for file in sorted(to_add, key=lambda x: x.path): + self._add_file(wheel, file.path, file.relative_to_source_root()) + + def prepare_metadata(self, metadata_directory: Path) -> Path: + dist_info = metadata_directory / self.dist_info + dist_info.mkdir(parents=True, exist_ok=True) + + if ( + "scripts" in self._poetry.local_config + or "plugins" in self._poetry.local_config + ): + with (dist_info / "entry_points.txt").open( + "w", encoding="utf-8", newline="\n" + ) as f: + self._write_entry_points(f) + + with (dist_info / "WHEEL").open("w", encoding="utf-8", newline="\n") as f: + self._write_wheel_file(f) + + with (dist_info / "METADATA").open("w", encoding="utf-8", newline="\n") as f: + self._write_metadata_file(f) + + license_files = set() + for base in ("COPYING", "LICENSE"): + license_files.add(self._path / base) + license_files.update(self._path.glob(base + ".*")) + + license_files.update(self._path.joinpath("LICENSES").glob("**/*")) + + for license_file in license_files: + if not license_file.is_file(): + logger.debug(f"Skipping: {license_file.as_posix()}") + continue + + dest = dist_info / license_file.relative_to(self._path) + os.makedirs(dest.parent, exist_ok=True) + shutil.copy(license_file, dest) + + return dist_info + + def _write_record(self, wheel: zipfile.ZipFile) -> None: + # Write a record of the files in the wheel + with self._write_to_zip(wheel, self.dist_info + "/RECORD") as f: + record = StringIO() + + csv_writer = csv.writer( + record, + delimiter=csv.excel.delimiter, + quotechar=csv.excel.quotechar, + lineterminator="\n", + ) + for path, hash, size in self._records: + csv_writer.writerow((path, f"sha256={hash}", size)) + + # RECORD itself is recorded with no hash or size + csv_writer.writerow((self.dist_info + "/RECORD", "", "")) + + f.write(record.getvalue()) + + def _copy_dist_info(self, wheel: zipfile.ZipFile, source: Path) -> None: + dist_info = Path(self.dist_info) + for file in source.glob("**/*"): + if not file.is_file(): + continue + + rel_path = file.relative_to(source) + target = dist_info / rel_path + self._add_file(wheel, file, target) + + @property + def dist_info(self) -> str: + return self.dist_info_name(self._package.name, self._meta.version) + + @property + def wheel_data_folder(self) -> str: + return f"{self._package.name}-{self._meta.version}.data" + + @property + def wheel_filename(self) -> str: + name = distribution_name(self._package.name) + version = self._meta.version + return f"{name}-{version}-{self.tag}.whl" + + def supports_python2(self) -> bool: + return self._package.python_constraint.allows_any( + parse_constraint(">=2.0.0 <3.0.0") + ) + + def dist_info_name(self, name: NormalizedName, version: str) -> str: + escaped_name = distribution_name(name) + return f"{escaped_name}-{version}.dist-info" + + @property + def tag(self) -> str: + if self._package.build_script: + sys_tag = next(sys_tags()) + tag = (sys_tag.interpreter, sys_tag.abi, sys_tag.platform) + else: + platform = "any" + if self.supports_python2(): + impl = "py2.py3" + else: + impl = "py3" + + tag = (impl, "none", platform) + + return "-".join(tag) + + def _add_file( + self, + wheel: zipfile.ZipFile, + full_path: Path | str, + rel_path: Path | str, + ) -> None: + full_path, rel_path = str(full_path), str(rel_path) + if os.sep != "/": + # We always want to have /-separated paths in the zip file and in + # RECORD + rel_path = rel_path.replace(os.sep, "/") + + zinfo = zipfile.ZipInfo(rel_path) + + # Normalize permission bits to either 755 (executable) or 644 + st_mode = os.stat(full_path).st_mode + new_mode = normalize_file_permissions(st_mode) + zinfo.external_attr = (new_mode & 0xFFFF) << 16 # Unix attributes + + if stat.S_ISDIR(st_mode): + zinfo.external_attr |= 0x10 # MS-DOS directory flag + + hashsum = hashlib.sha256() + with open(full_path, "rb") as src: + while True: + buf = src.read(1024 * 8) + if not buf: + break + hashsum.update(buf) + + src.seek(0) + wheel.writestr(zinfo, src.read(), compress_type=zipfile.ZIP_DEFLATED) + + size = os.stat(full_path).st_size + hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=") + + self._records.append((rel_path, hash_digest, size)) + + @contextlib.contextmanager + def _write_to_zip( + self, wheel: zipfile.ZipFile, rel_path: str + ) -> Iterator[StringIO]: + sio = StringIO() + yield sio + + # The default is a fixed timestamp rather than the current time, so + # that building a wheel twice on the same computer can automatically + # give you the exact same result. + date_time = (2016, 1, 1, 0, 0, 0) + zi = zipfile.ZipInfo(rel_path, date_time) + zi.external_attr = (0o644 & 0xFFFF) << 16 # Unix attributes + b = sio.getvalue().encode("utf-8") + hashsum = hashlib.sha256(b) + hash_digest = urlsafe_b64encode(hashsum.digest()).decode("ascii").rstrip("=") + + wheel.writestr(zi, b, compress_type=zipfile.ZIP_DEFLATED) + self._records.append((rel_path, hash_digest, len(b))) + + def _write_entry_points(self, fp: TextIO) -> None: + """ + Write entry_points.txt. + """ + entry_points = self.convert_entry_points() + + for group_name in sorted(entry_points): + fp.write(f"[{group_name}]\n") + for ep in sorted(entry_points[group_name]): + fp.write(ep.replace(" ", "") + "\n") + + fp.write("\n") + + def _write_wheel_file(self, fp: TextIO) -> None: + fp.write( + wheel_file_template.format( + version=__version__, + pure_lib="true" if self._package.build_script is None else "false", + tag=self.tag, + ) + ) + + def _write_metadata_file(self, fp: TextIO) -> None: + """ + Write out metadata in the 2.x format (email like) + """ + fp.write(self.get_metadata_content()) diff --git a/src/poetry/core/masonry/metadata.py b/src/poetry/core/masonry/metadata.py new file mode 100644 index 0000000..48ecae6 --- /dev/null +++ b/src/poetry/core/masonry/metadata.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.utils.helpers import readme_content_type + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + from poetry.core.packages.package import Package + + +class Metadata: + metadata_version = "2.1" + # version 1.0 + name: NormalizedName | None = None + version: str + platforms: tuple[str, ...] = () + supported_platforms: tuple[str, ...] = () + summary: str | None = None + description: str | None = None + keywords: str | None = None + home_page: str | None = None + download_url: str | None = None + author: str | None = None + author_email: str | None = None + license: str | None = None + # version 1.1 + classifiers: tuple[str, ...] = () + requires: tuple[str, ...] = () + provides: tuple[str, ...] = () + obsoletes: tuple[str, ...] = () + # version 1.2 + maintainer: str | None = None + maintainer_email: str | None = None + requires_python: str | None = None + requires_external: tuple[str, ...] = () + requires_dist: list[str] = [] + provides_dist: tuple[str, ...] = () + obsoletes_dist: tuple[str, ...] = () + project_urls: tuple[str, ...] = () + + # Version 2.1 + description_content_type: str | None = None + provides_extra: list[str] = [] + + @classmethod + def from_package(cls, package: Package) -> Metadata: + from poetry.core.version.helpers import format_python_constraint + + meta = cls() + + meta.name = package.name + meta.version = package.version.to_string() + meta.summary = package.description + if package.readmes: + descriptions = [] + for readme in package.readmes: + with readme.open(encoding="utf-8") as f: + descriptions.append(f.read()) + meta.description = "\n".join(descriptions) + + meta.keywords = ",".join(package.keywords) + meta.home_page = package.homepage or package.repository_url + meta.author = package.author_name + meta.author_email = package.author_email + + if package.license: + meta.license = package.license.id + + meta.classifiers = tuple(package.all_classifiers) + + # Version 1.2 + meta.maintainer = package.maintainer_name + meta.maintainer_email = package.maintainer_email + + # Requires python + if package.python_versions != "*": + meta.requires_python = format_python_constraint(package.python_constraint) + + meta.requires_dist = [d.to_pep_508() for d in package.requires] + + # Version 2.1 + if package.readmes: + meta.description_content_type = readme_content_type(package.readmes[0]) + + meta.provides_extra = list(package.extras) + + if package.urls: + for name, url in package.urls.items(): + if name == "Homepage" and meta.home_page == url: + continue + + meta.project_urls += (f"{name}, {url}",) + + return meta diff --git a/src/poetry/core/masonry/utils/__init__.py b/src/poetry/core/masonry/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/masonry/utils/helpers.py b/src/poetry/core/masonry/utils/helpers.py new file mode 100644 index 0000000..cad1b4e --- /dev/null +++ b/src/poetry/core/masonry/utils/helpers.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import re +import warnings + +from typing import TYPE_CHECKING +from typing import NewType +from typing import cast + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + +DistributionName = NewType("DistributionName", str) + + +def normalize_file_permissions(st_mode: int) -> int: + """ + Normalizes the permission bits in the st_mode field from stat to 644/755 + + Popular VCSs only track whether a file is executable or not. The exact + permissions can vary on systems with different umasks. Normalising + to 644 (non executable) or 755 (executable) makes builds more reproducible. + """ + # Set 644 permissions, leaving higher bits of st_mode unchanged + new_mode = (st_mode | 0o644) & ~0o133 + if st_mode & 0o100: + new_mode |= 0o111 # Executable: 644 -> 755 + + return new_mode + + +def escape_version(version: str) -> str: + """ + Escaped version in wheel filename. Doesn't exactly follow + the escaping specification in :pep:`427#escaping-and-unicode` + because this conflicts with :pep:`440#local-version-identifiers`. + """ + warnings.warn( + "escape_version() is deprecated. Use Version.parse().to_string() instead.", + DeprecationWarning, + stacklevel=2, + ) + return re.sub(r"[^\w\d.+]+", "_", version, flags=re.UNICODE) + + +def escape_name(name: str) -> str: + """ + Escaped wheel name as specified in https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode. + This function should only be used for the generation of artifact names, and not to normalize or filter existing artifact names. + """ + warnings.warn( + "escape_name() is deprecated. Use packaging.utils.canonicalize_name() and" + " distribution_name() instead.", + DeprecationWarning, + stacklevel=2, + ) + return re.sub(r"[-_.]+", "_", name, flags=re.UNICODE).lower() + + +def distribution_name(name: NormalizedName) -> DistributionName: + """ + A normalized name, but with "-" replaced by "_". This is used in various places: + + https://packaging.python.org/en/latest/specifications/binary-distribution-format/#escaping-and-unicode + + In distribution names ... This is equivalent to PEP 503 normalisation followed by + replacing - with _. + + https://packaging.python.org/en/latest/specifications/source-distribution-format/#source-distribution-file-name + + ... {name} is normalised according to the same rules as for binary distributions + + https://packaging.python.org/en/latest/specifications/recording-installed-packages/#the-dist-info-directory + + This directory is named as {name}-{version}.dist-info, with name and version fields + corresponding to Core metadata specifications. Both fields must be normalized + (see PEP 503 and PEP 440 for the definition of normalization for each field + respectively), and replace dash (-) characters with underscore (_) characters ... + """ + distribution_name = name.replace("-", "_") + return cast("DistributionName", distribution_name) diff --git a/src/poetry/core/masonry/utils/include.py b/src/poetry/core/masonry/utils/include.py new file mode 100644 index 0000000..f183aa6 --- /dev/null +++ b/src/poetry/core/masonry/utils/include.py @@ -0,0 +1,51 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from pathlib import Path + + +class Include: + """ + Represents an "include" entry. + + It can be a glob string, a single file or a directory. + + This class will then detect the type of this include: + + - a package + - a module + - a file + - a directory + """ + + def __init__( + self, base: Path, include: str, formats: list[str] | None = None + ) -> None: + self._base = base + self._include = str(include) + self._formats = formats + + self._elements: list[Path] = sorted(self._base.glob(str(self._include))) + + @property + def base(self) -> Path: + return self._base + + @property + def elements(self) -> list[Path]: + return self._elements + + @property + def formats(self) -> list[str] | None: + return self._formats + + def is_empty(self) -> bool: + return len(self._elements) == 0 + + def refresh(self) -> Include: + self._elements = sorted(self._base.glob(self._include)) + + return self diff --git a/src/poetry/core/masonry/utils/module.py b/src/poetry/core/masonry/utils/module.py new file mode 100644 index 0000000..c97aefd --- /dev/null +++ b/src/poetry/core/masonry/utils/module.py @@ -0,0 +1,115 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any + + +if TYPE_CHECKING: + from poetry.core.masonry.utils.include import Include + + +class ModuleOrPackageNotFound(ValueError): + pass + + +class Module: + def __init__( + self, + name: str, + directory: str = ".", + packages: list[dict[str, Any]] | None = None, + includes: list[dict[str, Any]] | None = None, + ) -> None: + from poetry.core.masonry.utils.include import Include + from poetry.core.masonry.utils.package_include import PackageInclude + from poetry.core.utils.helpers import module_name + + self._name = module_name(name) + self._in_src = False + self._is_package = False + self._path = Path(directory) + self._includes: list[Include] = [] + packages = packages or [] + includes = includes or [] + + if not packages: + # It must exist either as a .py file or a directory, but not both + pkg_dir = Path(directory, self._name) + py_file = Path(directory, self._name + ".py") + if pkg_dir.is_dir() and py_file.is_file(): + raise ValueError(f"Both {pkg_dir} and {py_file} exist") + elif pkg_dir.is_dir(): + packages = [{"include": str(pkg_dir.relative_to(self._path))}] + elif py_file.is_file(): + packages = [{"include": str(py_file.relative_to(self._path))}] + else: + # Searching for a src module + src = Path(directory, "src") + src_pkg_dir = src / self._name + src_py_file = src / (self._name + ".py") + + if src_pkg_dir.is_dir() and src_py_file.is_file(): + raise ValueError(f"Both {pkg_dir} and {py_file} exist") + elif src_pkg_dir.is_dir(): + packages = [ + { + "include": str(src_pkg_dir.relative_to(src)), + "from": str(src.relative_to(self._path)), + } + ] + elif src_py_file.is_file(): + packages = [ + { + "include": str(src_py_file.relative_to(src)), + "from": str(src.relative_to(self._path)), + } + ] + else: + raise ModuleOrPackageNotFound( + f"No file/folder found for package {name}" + ) + + for package in packages: + formats = package.get("format") + if formats and not isinstance(formats, list): + formats = [formats] + + self._includes.append( + PackageInclude( + self._path, + package["include"], + formats=formats, + source=package.get("from"), + ) + ) + + for include in includes: + self._includes.append( + Include(self._path, include["path"], formats=include["format"]) + ) + + @property + def name(self) -> str: + return self._name + + @property + def path(self) -> Path: + return self._path + + @property + def file(self) -> Path: + if self._is_package: + return self._path / "__init__.py" + else: + return self._path + + @property + def includes(self) -> list[Include]: + return self._includes + + def is_package(self) -> bool: + return self._is_package + + def is_in_src(self) -> bool: + return self._in_src diff --git a/src/poetry/core/masonry/utils/package_include.py b/src/poetry/core/masonry/utils/package_include.py new file mode 100644 index 0000000..643d02f --- /dev/null +++ b/src/poetry/core/masonry/utils/package_include.py @@ -0,0 +1,93 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.masonry.utils.include import Include + + +if TYPE_CHECKING: + from pathlib import Path + + +class PackageInclude(Include): + def __init__( + self, + base: Path, + include: str, + formats: list[str] | None = None, + source: str | None = None, + ) -> None: + self._package: str + self._is_package = False + self._is_module = False + self._source = source + + if source is not None: + base = base / source + + super().__init__(base, include, formats=formats) + self.check_elements() + + @property + def package(self) -> str: + return self._package + + @property + def source(self) -> str | None: + return self._source + + def is_package(self) -> bool: + return self._is_package + + def is_module(self) -> bool: + return self._is_module + + def refresh(self) -> PackageInclude: + super().refresh() + + return self.check_elements() + + def is_stub_only(self) -> bool: + # returns `True` if this a PEP 561 stub-only package, + # see [PEP 561](https://www.python.org/dev/peps/pep-0561/#stub-only-packages) + return (self.package or "").endswith("-stubs") and all( + el.suffix == ".pyi" or el.name == "py.typed" + for el in self.elements + if el.is_file() + ) + + def has_modules(self) -> bool: + # Packages no longer need an __init__.py in python3, but there must + # at least be one .py file for it to be considered a package + return any(element.suffix == ".py" for element in self.elements) + + def check_elements(self) -> PackageInclude: + if not self._elements: + raise ValueError( + f"{self._base / self._include} does not contain any element" + ) + + root = self._elements[0] + if len(self._elements) > 1: + # Probably glob + self._is_package = True + self._package = root.parent.name + + if not self.is_stub_only() and not self.has_modules(): + raise ValueError(f"{root.name} is not a package.") + + else: + if root.is_dir(): + # If it's a directory, we include everything inside it + self._package = root.name + self._elements: list[Path] = sorted(root.glob("**/*")) + + if not self.is_stub_only() and not self.has_modules(): + raise ValueError(f"{root.name} is not a package.") + + self._is_package = True + else: + self._package = root.stem + self._is_module = True + + return self diff --git a/src/poetry/core/packages/__init__.py b/src/poetry/core/packages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/packages/constraints/__init__.py b/src/poetry/core/packages/constraints/__init__.py new file mode 100644 index 0000000..585f221 --- /dev/null +++ b/src/poetry/core/packages/constraints/__init__.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +import warnings + +from poetry.core.constraints.generic import AnyConstraint +from poetry.core.constraints.generic import BaseConstraint +from poetry.core.constraints.generic import Constraint +from poetry.core.constraints.generic import EmptyConstraint +from poetry.core.constraints.generic import MultiConstraint +from poetry.core.constraints.generic import UnionConstraint +from poetry.core.constraints.generic import parse_constraint +from poetry.core.constraints.generic.parser import parse_single_constraint + + +warnings.warn( + "poetry.core.packages.constraints is deprecated." + " Use poetry.core.constraints.generic instead.", + DeprecationWarning, + stacklevel=2, +) + + +__all__ = [ + "AnyConstraint", + "BaseConstraint", + "Constraint", + "EmptyConstraint", + "MultiConstraint", + "UnionConstraint", + "parse_constraint", + "parse_single_constraint", +] diff --git a/src/poetry/core/packages/dependency.py b/src/poetry/core/packages/dependency.py new file mode 100644 index 0000000..b7e8f7f --- /dev/null +++ b/src/poetry/core/packages/dependency.py @@ -0,0 +1,530 @@ +from __future__ import annotations + +import os +import re +import warnings + +from contextlib import suppress +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Iterable +from typing import TypeVar + +from packaging.utils import canonicalize_name + +from poetry.core.constraints.generic import parse_constraint as parse_generic_constraint +from poetry.core.constraints.version import VersionRangeConstraint +from poetry.core.constraints.version import parse_constraint +from poetry.core.packages.dependency_group import MAIN_GROUP +from poetry.core.packages.specification import PackageSpecification +from poetry.core.packages.utils.utils import contains_group_without_marker +from poetry.core.packages.utils.utils import create_nested_marker +from poetry.core.packages.utils.utils import normalize_python_version_markers +from poetry.core.version.markers import parse_marker + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + from poetry.core.constraints.version import VersionConstraint + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + from poetry.core.version.markers import BaseMarker + + T = TypeVar("T", bound="Dependency") + + +class Dependency(PackageSpecification): + def __init__( + self, + name: str, + constraint: str | VersionConstraint, + optional: bool = False, + groups: Iterable[str] | None = None, + allows_prereleases: bool = False, + extras: Iterable[str] | None = None, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + ) -> None: + from poetry.core.version.markers import AnyMarker + + super().__init__( + name, + source_type=source_type, + source_url=source_url, + source_reference=source_reference, + source_resolved_reference=source_resolved_reference, + source_subdirectory=source_subdirectory, + features=extras, + ) + + self._constraint: VersionConstraint + self._pretty_constraint: str + self.constraint = constraint # type: ignore[assignment] + + self._optional = optional + + if not groups: + groups = [MAIN_GROUP] + + self._groups = frozenset(groups) + + if ( + isinstance(self._constraint, VersionRangeConstraint) + and self._constraint.min + ): + allows_prereleases = ( + allows_prereleases or self._constraint.min.is_unstable() + ) + + self._allows_prereleases = allows_prereleases + + self._python_versions = "*" + self._python_constraint = parse_constraint("*") + self._transitive_python_versions: str | None = None + self._transitive_python_constraint: VersionConstraint | None = None + self._transitive_marker: BaseMarker | None = None + + self._in_extras: list[NormalizedName] = [] + + self._activated = not self._optional + + self.is_root = False + self._marker: BaseMarker = AnyMarker() + self.source_name: str | None = None + + @property + def name(self) -> NormalizedName: + return self._name + + @property + def constraint(self) -> VersionConstraint: + return self._constraint + + @constraint.setter + def constraint(self, constraint: str | VersionConstraint) -> None: + if isinstance(constraint, str): + self._constraint = parse_constraint(constraint) + else: + self._constraint = constraint + + self._pretty_constraint = str(constraint) + + def set_constraint(self, constraint: str | VersionConstraint) -> None: + warnings.warn( + "Calling method 'set_constraint' is deprecated and will be removed. " + "It has been replaced by the property 'constraint' for consistency.", + DeprecationWarning, + stacklevel=2, + ) + self.constraint = constraint # type: ignore[assignment] + + @property + def pretty_constraint(self) -> str: + return self._pretty_constraint + + @property + def pretty_name(self) -> str: + return self._pretty_name + + @property + def groups(self) -> frozenset[str]: + return self._groups + + @property + def python_versions(self) -> str: + return self._python_versions + + @python_versions.setter + def python_versions(self, value: str) -> None: + self._python_versions = value + self._python_constraint = parse_constraint(value) + if not self._python_constraint.is_any(): + self._marker = self._marker.intersect( + parse_marker( + create_nested_marker("python_version", self._python_constraint) + ) + ) + + @property + def transitive_python_versions(self) -> str: + if self._transitive_python_versions is None: + return self._python_versions + + return self._transitive_python_versions + + @transitive_python_versions.setter + def transitive_python_versions(self, value: str) -> None: + self._transitive_python_versions = value + self._transitive_python_constraint = parse_constraint(value) + + @property + def marker(self) -> BaseMarker: + return self._marker + + @marker.setter + def marker(self, marker: str | BaseMarker) -> None: + from poetry.core.constraints.version import parse_constraint + from poetry.core.packages.utils.utils import convert_markers + from poetry.core.version.markers import BaseMarker + from poetry.core.version.markers import parse_marker + + if not isinstance(marker, BaseMarker): + marker = parse_marker(marker) + + self._marker = marker + + markers = convert_markers(marker) + + if "extra" in markers: + # If we have extras, the dependency is optional + self.deactivate() + + for or_ in markers["extra"]: + for _, extra in or_: + self.in_extras.append(canonicalize_name(extra)) + + # Recalculate python versions. + self._python_versions = "*" + if not contains_group_without_marker(markers, "python_version"): + python_version_markers = markers["python_version"] + self._python_versions = normalize_python_version_markers( + python_version_markers + ) + + self._python_constraint = parse_constraint(self._python_versions) + + @property + def transitive_marker(self) -> BaseMarker: + if self._transitive_marker is None: + return self.marker + + return self._transitive_marker + + @transitive_marker.setter + def transitive_marker(self, value: BaseMarker) -> None: + self._transitive_marker = value + + @property + def python_constraint(self) -> VersionConstraint: + return self._python_constraint + + @property + def transitive_python_constraint(self) -> VersionConstraint: + if self._transitive_python_constraint is None: + return self._python_constraint + + return self._transitive_python_constraint + + @property + def extras(self) -> frozenset[NormalizedName]: + # extras activated in a dependency is the same as features + return self._features + + @property + def in_extras(self) -> list[NormalizedName]: + return self._in_extras + + @property + def base_pep_508_name(self) -> str: + from poetry.core.constraints.version import Version + from poetry.core.constraints.version import VersionUnion + + requirement = self.pretty_name + + if self.extras: + extras = ",".join(sorted(self.extras)) + requirement += f"[{extras}]" + + constraint = self.constraint + if isinstance(constraint, VersionUnion): + if ( + constraint.excludes_single_version() + or constraint.excludes_single_wildcard_range() + ): + # This branch is a short-circuit logic for special cases and + # avoids having to split and parse constraint again. This has + # no functional difference with the logic in the else branch. + requirement += f" ({str(constraint)})" + else: + constraints = ",".join( + str(parse_constraint(c)) for c in self.pretty_constraint.split(",") + ) + requirement += f" ({constraints})" + elif isinstance(constraint, Version): + requirement += f" (=={constraint.text})" + elif not constraint.is_any(): + requirement += f" ({str(constraint).replace(' ', '')})" + + return requirement + + def allows_prereleases(self) -> bool: + return self._allows_prereleases + + def is_optional(self) -> bool: + return self._optional + + def is_activated(self) -> bool: + return self._activated + + def is_vcs(self) -> bool: + return False + + def is_file(self) -> bool: + return False + + def is_directory(self) -> bool: + return False + + def is_url(self) -> bool: + return False + + def to_pep_508(self, with_extras: bool = True) -> str: + from poetry.core.packages.utils.utils import convert_markers + + requirement = self.base_pep_508_name + + markers = [] + has_extras = False + if not self.marker.is_any(): + marker = self.marker + if not with_extras: + marker = marker.without_extras() + + # we re-check for any marker here since the without extra marker might + # return an any marker again + if not marker.is_empty() and not marker.is_any(): + markers.append(str(marker)) + + has_extras = "extra" in convert_markers(marker) + else: + # Python marker + if self.python_versions != "*": + python_constraint = self.python_constraint + + markers.append( + create_nested_marker("python_version", python_constraint) + ) + + in_extras = " || ".join(self._in_extras) + if in_extras and with_extras and not has_extras: + markers.append( + create_nested_marker("extra", parse_generic_constraint(in_extras)) + ) + + if markers: + if len(markers) > 1: + marker_str = " and ".join(f"({m})" for m in markers) + else: + marker_str = markers[0] + requirement += f" ; {marker_str}" + + return requirement + + def activate(self) -> None: + """ + Set the dependency as mandatory. + """ + self._activated = True + + def deactivate(self) -> None: + """ + Set the dependency as optional. + """ + if not self._optional: + self._optional = True + + self._activated = False + + def with_constraint(self: T, constraint: str | VersionConstraint) -> T: + dependency = self.clone() + dependency.constraint = constraint # type: ignore[assignment] + return dependency + + @classmethod + def create_from_pep_508( + cls, name: str, relative_to: Path | None = None + ) -> Dependency: + """ + Resolve a PEP-508 requirement string to a `Dependency` instance. If a `relative_to` + path is specified, this is used as the base directory if the identified dependency is + of file or directory type. + """ + from poetry.core.packages.url_dependency import URLDependency + from poetry.core.packages.utils.link import Link + from poetry.core.packages.utils.utils import is_archive_file + from poetry.core.packages.utils.utils import is_python_project + from poetry.core.packages.utils.utils import is_url + from poetry.core.packages.utils.utils import path_to_url + from poetry.core.packages.utils.utils import strip_extras + from poetry.core.packages.utils.utils import url_to_path + from poetry.core.packages.vcs_dependency import VCSDependency + from poetry.core.utils.patterns import wheel_file_re + from poetry.core.vcs.git import ParsedUrl + from poetry.core.version.requirements import Requirement + + # Removing comments + parts = name.split(" #", 1) + name = parts[0].strip() + if len(parts) > 1: + rest = parts[1] + if " ;" in rest: + name += " ;" + rest.split(" ;", 1)[1] + + req = Requirement(name) + + name = req.name + link = None + + if is_url(name): + link = Link(name) + elif req.url: + link = Link(req.url) + else: + path_str = os.path.normpath(os.path.abspath(name)) + p, extras = strip_extras(path_str) + if os.path.isdir(p) and (os.path.sep in name or name.startswith(".")): + if not is_python_project(Path(name)): + raise ValueError( + f"Directory {name!r} is not installable. File 'setup.[py|cfg]' " + "not found." + ) + link = Link(path_to_url(p)) + elif is_archive_file(p): + link = Link(path_to_url(p)) + + # it's a local file, dir, or url + if link: + is_file_uri = link.scheme == "file" + is_relative_uri = is_file_uri and re.search(r"\.\./", link.url) + + # Handle relative file URLs + if is_file_uri and is_relative_uri: + path = Path(link.path) + if relative_to: + path = relative_to / path + link = Link(path_to_url(path)) + + # wheel file + version = None + if link.is_wheel: + m = wheel_file_re.match(link.filename) + if not m: + raise ValueError(f"Invalid wheel name: {link.filename}") + name = m.group("name") + version = m.group("ver") + + dep: Dependency | None = None + + if link.scheme.startswith("git+"): + url = ParsedUrl.parse(link.url) + dep = VCSDependency( + name, + "git", + url.url, + rev=url.rev, + directory=url.subdirectory, + extras=req.extras, + ) + elif link.scheme == "git": + dep = VCSDependency( + name, "git", link.url_without_fragment, extras=req.extras + ) + elif link.scheme in ["http", "https"]: + dep = URLDependency( + name, + link.url_without_fragment, + directory=link.subdirectory_fragment, + extras=req.extras, + ) + elif is_file_uri: + # handle RFC 8089 references + path = url_to_path(req.url) + dep = _make_file_or_dir_dep( + name=name, path=path, base=relative_to, extras=req.extras + ) + else: + with suppress(ValueError): + # this is a local path not using the file URI scheme + dep = _make_file_or_dir_dep( + name=name, + path=Path(req.url), + base=relative_to, + extras=req.extras, + ) + + if dep is None: + dep = Dependency(name, version or "*", extras=req.extras) + + if version: + dep._constraint = parse_constraint(version) + else: + constraint: VersionConstraint | str + if req.pretty_constraint: + constraint = req.constraint + else: + constraint = "*" + dep = Dependency(name, constraint, extras=req.extras) + + if req.marker: + dep.marker = req.marker + + return dep + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Dependency): + return NotImplemented + + # "constraint" is implicitly given for direct origin dependencies and might not + # be set yet ("*"). Thus, it shouldn't be used to determine if two direct origin + # dependencies are equal. + # Calling is_direct_origin() for one dependency is sufficient because + # super().__eq__() returns False for different origins. + return super().__eq__(other) and ( + self._constraint == other.constraint or self.is_direct_origin() + ) + + def __hash__(self) -> int: + # don't include _constraint in hash because it is mutable! + return super().__hash__() + + def __str__(self) -> str: + if self.is_root: + return self._pretty_name + if self.is_direct_origin(): + # adding version since this information is especially useful in debug output + parts = [p.strip() for p in self.base_pep_508_name.split("@", 1)] + return f"{parts[0]} ({self._pretty_constraint}) @ {parts[1]}" + return self.base_pep_508_name + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {str(self)}>" + + +def _make_file_or_dir_dep( + name: str, + path: Path, + base: Path | None = None, + extras: list[str] | None = None, +) -> FileDependency | DirectoryDependency | None: + """ + Helper function to create a file or directoru dependency with the given arguments. If + path is not a file or directory that exists, `None` is returned. + """ + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + + _path = path + if not path.is_absolute() and base: + # a base path was specified, so we should respect that + _path = Path(base) / path + + if _path.is_file(): + return FileDependency(name, path, base=base, extras=extras) + elif _path.is_dir(): + return DirectoryDependency(name, path, base=base, extras=extras) + + return None diff --git a/src/poetry/core/packages/dependency_group.py b/src/poetry/core/packages/dependency_group.py new file mode 100644 index 0000000..9afa692 --- /dev/null +++ b/src/poetry/core/packages/dependency_group.py @@ -0,0 +1,57 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from poetry.core.packages.dependency import Dependency + + +MAIN_GROUP = "main" + + +class DependencyGroup: + def __init__(self, name: str, optional: bool = False) -> None: + self._name: str = name + self._optional: bool = optional + self._dependencies: list[Dependency] = [] + + @property + def name(self) -> str: + return self._name + + @property + def dependencies(self) -> list[Dependency]: + return self._dependencies + + def is_optional(self) -> bool: + return self._optional + + def add_dependency(self, dependency: Dependency) -> None: + self._dependencies.append(dependency) + + def remove_dependency(self, name: str) -> None: + from packaging.utils import canonicalize_name + + name = canonicalize_name(name) + + dependencies = [] + for dependency in self.dependencies: + if dependency.name == name: + continue + + dependencies.append(dependency) + + self._dependencies = dependencies + + def __eq__(self, other: object) -> bool: + if not isinstance(other, DependencyGroup): + return NotImplemented + + return self._name == other.name and set(self._dependencies) == set( + other.dependencies + ) + + def __repr__(self) -> str: + cls = self.__class__.__name__ + return f"{cls}({self._name}, optional={self._optional})" diff --git a/src/poetry/core/packages/directory_dependency.py b/src/poetry/core/packages/directory_dependency.py new file mode 100644 index 0000000..5c882af --- /dev/null +++ b/src/poetry/core/packages/directory_dependency.py @@ -0,0 +1,95 @@ +from __future__ import annotations + +import functools + +from pathlib import Path +from typing import Iterable + +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.utils.utils import is_python_project +from poetry.core.packages.utils.utils import path_to_url +from poetry.core.pyproject.toml import PyProjectTOML + + +class DirectoryDependency(Dependency): + def __init__( + self, + name: str, + path: Path, + groups: Iterable[str] | None = None, + optional: bool = False, + base: Path | None = None, + develop: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + self._path = path + self._base = base or Path.cwd() + self._full_path = path + + if not self._path.is_absolute(): + try: + self._full_path = self._base.joinpath(self._path).resolve() + except FileNotFoundError: + raise ValueError(f"Directory {self._path} does not exist") + + self._develop = develop + + if not self._full_path.exists(): + raise ValueError(f"Directory {self._path} does not exist") + + if self._full_path.is_file(): + raise ValueError(f"{self._path} is a file, expected a directory") + + if not is_python_project(self._full_path): + raise ValueError( + f"Directory {self._full_path} does not seem to be a Python package" + ) + + super().__init__( + name, + "*", + groups=groups, + optional=optional, + allows_prereleases=True, + source_type="directory", + source_url=self._full_path.as_posix(), + extras=extras, + ) + + # cache this function to avoid multiple IO reads and parsing + self.supports_poetry = functools.lru_cache(maxsize=1)(self._supports_poetry) + + @property + def path(self) -> Path: + return self._path + + @property + def full_path(self) -> Path: + return self._full_path + + @property + def base(self) -> Path: + return self._base + + @property + def develop(self) -> bool: + return self._develop + + def _supports_poetry(self) -> bool: + return PyProjectTOML(self._full_path / "pyproject.toml").is_poetry_project() + + def is_directory(self) -> bool: + return True + + @property + def base_pep_508_name(self) -> str: + requirement = self.pretty_name + + if self.extras: + extras = ",".join(sorted(self.extras)) + requirement += f"[{extras}]" + + path = path_to_url(self.full_path) + requirement += f" @ {path}" + + return requirement diff --git a/src/poetry/core/packages/file_dependency.py b/src/poetry/core/packages/file_dependency.py new file mode 100644 index 0000000..2b71000 --- /dev/null +++ b/src/poetry/core/packages/file_dependency.py @@ -0,0 +1,84 @@ +from __future__ import annotations + +import hashlib +import io + +from pathlib import Path +from typing import Iterable + +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.utils.utils import path_to_url + + +class FileDependency(Dependency): + def __init__( + self, + name: str, + path: Path, + groups: Iterable[str] | None = None, + optional: bool = False, + base: Path | None = None, + extras: Iterable[str] | None = None, + ) -> None: + self._path = path + self._base = base or Path.cwd() + self._full_path = path + + if not self._path.is_absolute(): + try: + self._full_path = self._base.joinpath(self._path).resolve() + except FileNotFoundError: + raise ValueError(f"Directory {self._path} does not exist") + + if not self._full_path.exists(): + raise ValueError(f"File {self._path} does not exist") + + if self._full_path.is_dir(): + raise ValueError(f"{self._path} is a directory, expected a file") + + super().__init__( + name, + "*", + groups=groups, + optional=optional, + allows_prereleases=True, + source_type="file", + source_url=self._full_path.as_posix(), + extras=extras, + ) + + @property + def base(self) -> Path: + return self._base + + @property + def path(self) -> Path: + return self._path + + @property + def full_path(self) -> Path: + return self._full_path + + def is_file(self) -> bool: + return True + + def hash(self, hash_name: str = "sha256") -> str: + h = hashlib.new(hash_name) + with self._full_path.open("rb") as fp: + for content in iter(lambda: fp.read(io.DEFAULT_BUFFER_SIZE), b""): + h.update(content) + + return h.hexdigest() + + @property + def base_pep_508_name(self) -> str: + requirement = self.pretty_name + + if self.extras: + extras = ",".join(sorted(self.extras)) + requirement += f"[{extras}]" + + path = path_to_url(self.full_path) + requirement += f" @ {path}" + + return requirement diff --git a/src/poetry/core/packages/package.py b/src/poetry/core/packages/package.py new file mode 100644 index 0000000..1651ca6 --- /dev/null +++ b/src/poetry/core/packages/package.py @@ -0,0 +1,645 @@ +from __future__ import annotations + +import copy +import re + +from contextlib import contextmanager +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Collection +from typing import Iterable +from typing import Iterator +from typing import TypeVar + +from poetry.core.constraints.version import parse_constraint +from poetry.core.constraints.version.exceptions import ParseConstraintError +from poetry.core.packages.dependency_group import MAIN_GROUP +from poetry.core.packages.specification import PackageSpecification +from poetry.core.packages.utils.utils import create_nested_marker +from poetry.core.version.exceptions import InvalidVersion +from poetry.core.version.markers import parse_marker + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + from poetry.core.constraints.version import Version + from poetry.core.constraints.version import VersionConstraint + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.dependency_group import DependencyGroup + from poetry.core.spdx.license import License + from poetry.core.version.markers import BaseMarker + + T = TypeVar("T", bound="Package") + +AUTHOR_REGEX = re.compile(r"(?u)^(?P[- .,\w\d'’\"():&]+)(?: <(?P.+?)>)?$") + + +class Package(PackageSpecification): + AVAILABLE_PYTHONS = { + "2", + "2.7", + "3", + "3.4", + "3.5", + "3.6", + "3.7", + "3.8", + "3.9", + "3.10", + "3.11", + } + + def __init__( + self, + name: str, + version: str | Version, + pretty_version: str | None = None, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + features: Iterable[str] | None = None, + develop: bool = False, + yanked: str | bool = False, + ) -> None: + """ + Creates a new in memory package. + """ + from poetry.core.version.markers import AnyMarker + + super().__init__( + name, + source_type=source_type, + source_url=source_url, + source_reference=source_reference, + source_resolved_reference=source_resolved_reference, + source_subdirectory=source_subdirectory, + features=features, + ) + + self._set_version(version, pretty_version) + + self.description = "" + + self._authors: list[str] = [] + self._maintainers: list[str] = [] + + self.homepage: str | None = None + self.repository_url: str | None = None + self.documentation_url: str | None = None + self.keywords: list[str] = [] + self._license: License | None = None + self.readmes: tuple[Path, ...] = () + + self.extras: dict[NormalizedName, list[Dependency]] = {} + + self._dependency_groups: dict[str, DependencyGroup] = {} + + # For compatibility with previous version, we keep the category + self.category = "main" + self.files: list[dict[str, str]] = [] + self.optional = False + + self.classifiers: list[str] = [] + + self._python_versions = "*" + self._python_constraint = parse_constraint("*") + self._python_marker: BaseMarker = AnyMarker() + + self.platform = None + self.marker: BaseMarker = AnyMarker() + + self.root_dir: Path | None = None + + self.develop = develop + + self._yanked = yanked + + @property + def name(self) -> NormalizedName: + return self._name + + @property + def pretty_name(self) -> str: + return self._pretty_name + + @property + def version(self) -> Version: + return self._version + + @property + def pretty_version(self) -> str: + return self._pretty_version + + @property + def unique_name(self) -> str: + if self.is_root(): + return self._name + + return self.complete_name + "-" + self._version.text + + @property + def pretty_string(self) -> str: + return self.pretty_name + " " + self.pretty_version + + @property + def full_pretty_version(self) -> str: + if self.source_type in ["file", "directory", "url"]: + return f"{self._pretty_version} {self.source_url}" + + if self.source_type not in ["hg", "git"]: + return self._pretty_version + + ref: str | None + if self.source_resolved_reference and len(self.source_resolved_reference) == 40: + ref = self.source_resolved_reference[0:7] + return f"{self._pretty_version} {ref}" + + # if source reference is a sha1 hash -- truncate + if self.source_reference and len(self.source_reference) == 40: + return f"{self._pretty_version} {self.source_reference[0:7]}" + + ref = self._source_resolved_reference or self._source_reference + return f"{self._pretty_version} {ref}" + + @property + def authors(self) -> list[str]: + return self._authors + + @property + def author_name(self) -> str | None: + return self._get_author()["name"] + + @property + def author_email(self) -> str | None: + return self._get_author()["email"] + + @property + def maintainers(self) -> list[str]: + return self._maintainers + + @property + def maintainer_name(self) -> str | None: + return self._get_maintainer()["name"] + + @property + def maintainer_email(self) -> str | None: + return self._get_maintainer()["email"] + + @property + def requires(self) -> list[Dependency]: + """ + Returns the main dependencies + """ + if not self._dependency_groups or MAIN_GROUP not in self._dependency_groups: + return [] + + return self._dependency_groups[MAIN_GROUP].dependencies + + @property + def all_requires( + self, + ) -> list[Dependency]: + """ + Returns the main dependencies and group dependencies. + """ + return [ + dependency + for group in self._dependency_groups.values() + for dependency in group.dependencies + ] + + def _set_version( + self, version: str | Version, pretty_version: str | None = None + ) -> None: + from poetry.core.constraints.version import Version + + if not isinstance(version, Version): + try: + version = Version.parse(version) + except InvalidVersion: + raise InvalidVersion( + f"Invalid version '{version}' on package {self.name}" + ) + + self._version = version + self._pretty_version = pretty_version or version.text + + def _get_author(self) -> dict[str, str | None]: + if not self._authors: + return {"name": None, "email": None} + + m = AUTHOR_REGEX.match(self._authors[0]) + + if m is None: + raise ValueError( + "Invalid author string. Must be in the format: " + "John Smith " + ) + + name = m.group("name") + email = m.group("email") + + return {"name": name, "email": email} + + def _get_maintainer(self) -> dict[str, str | None]: + if not self._maintainers: + return {"name": None, "email": None} + + m = AUTHOR_REGEX.match(self._maintainers[0]) + + if m is None: + raise ValueError( + "Invalid maintainer string. Must be in the format: " + "John Smith " + ) + + name = m.group("name") + email = m.group("email") + + return {"name": name, "email": email} + + @property + def python_versions(self) -> str: + return self._python_versions + + @python_versions.setter + def python_versions(self, value: str) -> None: + try: + constraint = parse_constraint(value) + except ParseConstraintError: + raise ParseConstraintError(f"Invalid python versions '{value}' on {self}") + + self._python_versions = value + self._python_constraint = constraint + self._python_marker = parse_marker( + create_nested_marker("python_version", self._python_constraint) + ) + + @property + def python_constraint(self) -> VersionConstraint: + return self._python_constraint + + @property + def python_marker(self) -> BaseMarker: + return self._python_marker + + @property + def license(self) -> License | None: + return self._license + + @license.setter + def license(self, value: str | License | None) -> None: + from poetry.core.spdx.helpers import license_by_id + from poetry.core.spdx.license import License + + if value is None or isinstance(value, License): + self._license = value + else: + self._license = license_by_id(value) + + @property + def all_classifiers(self) -> list[str]: + from poetry.core.constraints.version import Version + + classifiers = copy.copy(self.classifiers) + + # Automatically set python classifiers + if self.python_versions == "*": + python_constraint = parse_constraint("~2.7 || ^3.4") + else: + python_constraint = self.python_constraint + + python_classifier_prefix = "Programming Language :: Python" + python_classifiers = [] + + # we sort python versions by sorting an int tuple of (major, minor) version + # to ensure we sort 3.10 after 3.9 + for version in sorted( + self.AVAILABLE_PYTHONS, key=lambda x: tuple(map(int, x.split("."))) + ): + if len(version) == 1: + constraint = parse_constraint(version + ".*") + else: + constraint = Version.parse(version) + + if python_constraint.allows_any(constraint): + classifier = f"{python_classifier_prefix} :: {version}" + if classifier not in python_classifiers: + python_classifiers.append(classifier) + + # Automatically set license classifiers + if self.license: + classifiers.append(self.license.classifier) + + # Sort classifiers and insert python classifiers at the right location. We do + # it like this so that 3.10 is sorted after 3.9. + sorted_classifiers = [] + python_classifiers_inserted = False + for classifier in sorted(set(classifiers)): + if ( + not python_classifiers_inserted + and classifier > python_classifier_prefix + ): + sorted_classifiers.extend(python_classifiers) + python_classifiers_inserted = True + sorted_classifiers.append(classifier) + + if not python_classifiers_inserted: + sorted_classifiers.extend(python_classifiers) + + return sorted_classifiers + + @property + def urls(self) -> dict[str, str]: + urls = {} + + if self.homepage: + urls["Homepage"] = self.homepage + + if self.repository_url: + urls["Repository"] = self.repository_url + + if self.documentation_url: + urls["Documentation"] = self.documentation_url + + return urls + + @property + def readme(self) -> Path | None: + import warnings + + warnings.warn( + "`readme` is deprecated: you are getting only the first readme file. Please" + " use the plural form `readmes`.", + DeprecationWarning, + ) + return next(iter(self.readmes), None) + + @readme.setter + def readme(self, path: Path) -> None: + import warnings + + warnings.warn( + "`readme` is deprecated. Please assign a tuple to the plural form" + " `readmes`.", + DeprecationWarning, + ) + self.readmes = (path,) + + @property + def yanked(self) -> bool: + return isinstance(self._yanked, str) or bool(self._yanked) + + @property + def yanked_reason(self) -> str: + if isinstance(self._yanked, str): + return self._yanked + return "" + + def is_prerelease(self) -> bool: + return self._version.is_unstable() + + def is_root(self) -> bool: + return False + + def dependency_group_names(self, include_optional: bool = False) -> set[str]: + return { + name + for name, group in self._dependency_groups.items() + if not group.is_optional() or include_optional + } + + def add_dependency_group(self, group: DependencyGroup) -> None: + self._dependency_groups[group.name] = group + + def has_dependency_group(self, name: str) -> bool: + return name in self._dependency_groups + + def dependency_group(self, name: str) -> DependencyGroup: + if not self.has_dependency_group(name): + raise ValueError(f'The dependency group "{name}" does not exist.') + + return self._dependency_groups[name] + + def add_dependency( + self, + dependency: Dependency, + ) -> Dependency: + from poetry.core.packages.dependency_group import DependencyGroup + + for group_name in dependency.groups: + if group_name not in self._dependency_groups: + # Dynamically add the dependency group + self.add_dependency_group(DependencyGroup(group_name)) + + self._dependency_groups[group_name].add_dependency(dependency) + + return dependency + + def without_dependency_groups(self: T, groups: Collection[str]) -> T: + """ + Returns a clone of the package with the given dependency groups excluded. + """ + package = self.clone() + + for group_name in groups: + if group_name in package._dependency_groups: + del package._dependency_groups[group_name] + + return package + + def without_optional_dependency_groups(self: T) -> T: + """ + Returns a clone of the package without optional dependency groups. + """ + package = self.clone() + + for group_name, group in self._dependency_groups.items(): + if group.is_optional(): + del package._dependency_groups[group_name] + + return package + + def with_dependency_groups( + self: T, groups: Collection[str], only: bool = False + ) -> T: + """ + Returns a clone of the package with the given dependency groups opted in. + + Note that it will return all dependencies across all groups + more the given, optional, groups. + + If `only` is set to True, then only the given groups will be selected. + """ + package = self.clone() + + for group_name, group in self._dependency_groups.items(): + if (only or group.is_optional()) and group_name not in groups: + del package._dependency_groups[group_name] + + return package + + def to_dependency(self) -> Dependency: + from pathlib import Path + + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + from poetry.core.packages.url_dependency import URLDependency + from poetry.core.packages.vcs_dependency import VCSDependency + + dep: Dependency + if self.source_type == "directory": + assert self._source_url is not None + dep = DirectoryDependency( + self._name, + Path(self._source_url), + groups=list(self._dependency_groups.keys()), + optional=self.optional, + base=self.root_dir, + develop=self.develop, + extras=self.features, + ) + elif self.source_type == "file": + assert self._source_url is not None + dep = FileDependency( + self._name, + Path(self._source_url), + groups=list(self._dependency_groups.keys()), + optional=self.optional, + base=self.root_dir, + extras=self.features, + ) + elif self.source_type == "url": + assert self._source_url is not None + dep = URLDependency( + self._name, + self._source_url, + directory=self.source_subdirectory, + groups=list(self._dependency_groups.keys()), + optional=self.optional, + extras=self.features, + ) + elif self.source_type == "git": + assert self._source_url is not None + dep = VCSDependency( + self._name, + self.source_type, + self._source_url, + rev=self.source_reference, + resolved_rev=self.source_resolved_reference, + directory=self.source_subdirectory, + groups=list(self._dependency_groups.keys()), + optional=self.optional, + develop=self.develop, + extras=self.features, + ) + else: + dep = Dependency(self._name, self._version, extras=self.features) + + if not self.marker.is_any(): + dep.marker = self.marker + + if not self.python_constraint.is_any(): + dep.python_versions = self.python_versions + + if not self.is_direct_origin(): + return dep + + return dep.with_constraint(self._version) + + @contextmanager + def with_python_versions(self, python_versions: str) -> Iterator[None]: + original_python_versions = self.python_versions + + self.python_versions = python_versions + + yield + + self.python_versions = original_python_versions + + def satisfies( + self, dependency: Dependency, ignore_source_type: bool = False + ) -> bool: + """ + Helper method to check if this package satisfies a given dependency. + + This is determined by assessing if this instance provides the package specified + by the given dependency. Further, version and source types are checked. + """ + if self.name != dependency.name: + return False + + if not dependency.constraint.allows(self.version): + return False + + if not ignore_source_type and not self.source_satisfies(dependency): + return False + + return True + + def source_satisfies(self, dependency: Dependency) -> bool: + """Determine whether this package's source satisfies the given dependency.""" + if dependency.source_type is None: + if dependency.source_name is None: + # The dependency doesn't care about the source, so this package + # certainly satisfies it. + return True + + # The dependency specifies a source_name but not a type: it wants either + # pypi or a legacy repository. + # + # - If this package has no source type then it's from pypi, so it + # matches if and only if that's what the dependency wants + # - Else this package is a match if and only if it is from the desired + # repository + if self.source_type is None: + return dependency.source_name.lower() == "pypi" + + return ( + self.source_type == "legacy" + and self.source_reference is not None + and self.source_reference.lower() == dependency.source_name.lower() + ) + + # The dependency specifies a source: this package matches if and only if it is + # from that source. + return dependency.is_same_source_as(self) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Package): + return NotImplemented + + return super().__eq__(other) and self._version == other.version + + def __hash__(self) -> int: + return super().__hash__() ^ hash(self._version) + + def __str__(self) -> str: + return f"{self.complete_name} ({self.full_pretty_version})" + + def __repr__(self) -> str: + args = [repr(self._name), repr(self._version.text)] + + if self._features: + args.append(f"features={repr(self._features)}") + + if self._source_type: + args.append(f"source_type={repr(self._source_type)}") + args.append(f"source_url={repr(self._source_url)}") + + if self._source_reference: + args.append(f"source_reference={repr(self._source_reference)}") + + if self._source_resolved_reference: + args.append( + f"source_resolved_reference={repr(self._source_resolved_reference)}" + ) + if self._source_subdirectory: + args.append(f"source_subdirectory={repr(self._source_subdirectory)}") + + args_str = ", ".join(args) + return f"Package({args_str})" diff --git a/src/poetry/core/packages/project_package.py b/src/poetry/core/packages/project_package.py new file mode 100644 index 0000000..3af4eb1 --- /dev/null +++ b/src/poetry/core/packages/project_package.py @@ -0,0 +1,90 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from poetry.core.constraints.version import parse_constraint +from poetry.core.version.markers import parse_marker + + +if TYPE_CHECKING: + from poetry.core.packages.dependency import Dependency + from poetry.core.constraints.version import Version + +from poetry.core.packages.package import Package +from poetry.core.packages.utils.utils import create_nested_marker + + +class ProjectPackage(Package): + def __init__( + self, + name: str, + version: str | Version, + pretty_version: str | None = None, + ) -> None: + super().__init__(name, version, pretty_version) + + self.build_config: dict[str, Any] = {} + self.packages: list[dict[str, Any]] = [] + self.include: list[dict[str, Any]] = [] + self.exclude: list[dict[str, Any]] = [] + self.custom_urls: dict[str, str] = {} + + if self._python_versions == "*": + self._python_constraint = parse_constraint("~2.7 || >=3.4") + + @property + def build_script(self) -> str | None: + return self.build_config.get("script") + + def is_root(self) -> bool: + return True + + def to_dependency(self) -> Dependency: + dependency = super().to_dependency() + + dependency.is_root = True + + return dependency + + @property + def python_versions(self) -> str: + return self._python_versions + + @python_versions.setter + def python_versions(self, value: str) -> None: + self._python_versions = value + + if value == "*": + value = "~2.7 || >=3.4" + + self._python_constraint = parse_constraint(value) + self._python_marker = parse_marker( + create_nested_marker("python_version", self._python_constraint) + ) + + @property + def version(self) -> Version: + # override version to make it settable + return super().version + + @version.setter + def version(self, value: str | Version) -> None: + self._set_version(value) + + @property + def urls(self) -> dict[str, str]: + urls = super().urls + + urls.update(self.custom_urls) + + return urls + + def __hash__(self) -> int: + # The parent Package class's __hash__ incorporates the version because + # a Package's version is immutable. But a ProjectPackage's version is + # mutable. So call Package's parent hash function. + return super(Package, self).__hash__() + + def build_should_generate_setup(self) -> bool: + return self.build_config.get("generate-setup-file", True) diff --git a/src/poetry/core/packages/specification.py b/src/poetry/core/packages/specification.py new file mode 100644 index 0000000..b2dd9b0 --- /dev/null +++ b/src/poetry/core/packages/specification.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +import copy + +from typing import TYPE_CHECKING +from typing import Iterable +from typing import TypeVar + +from packaging.utils import canonicalize_name + + +if TYPE_CHECKING: + from packaging.utils import NormalizedName + + T = TypeVar("T", bound="PackageSpecification") + + +class PackageSpecification: + def __init__( + self, + name: str, + source_type: str | None = None, + source_url: str | None = None, + source_reference: str | None = None, + source_resolved_reference: str | None = None, + source_subdirectory: str | None = None, + features: Iterable[str] | None = None, + ) -> None: + from packaging.utils import canonicalize_name + + self._pretty_name = name + self._name = canonicalize_name(name) + self._source_type = source_type + self._source_url = source_url + self._source_reference = source_reference + self._source_resolved_reference = source_resolved_reference + self._source_subdirectory = source_subdirectory + + if not features: + features = [] + + self._features = frozenset(canonicalize_name(feature) for feature in features) + + @property + def name(self) -> NormalizedName: + return self._name + + @property + def pretty_name(self) -> str: + return self._pretty_name + + @property + def complete_name(self) -> str: + name: str = self._name + + if self._features: + features = ",".join(sorted(self._features)) + name = f"{name}[{features}]" + + return name + + @property + def source_type(self) -> str | None: + return self._source_type + + @property + def source_url(self) -> str | None: + return self._source_url + + @property + def source_reference(self) -> str | None: + return self._source_reference + + @property + def source_resolved_reference(self) -> str | None: + return self._source_resolved_reference + + @property + def source_subdirectory(self) -> str | None: + return self._source_subdirectory + + @property + def features(self) -> frozenset[NormalizedName]: + return self._features + + def is_direct_origin(self) -> bool: + return self._source_type in [ + "directory", + "file", + "url", + "git", + ] + + def provides(self, other: PackageSpecification) -> bool: + """ + Helper method to determine if this package provides the given specification. + + This determination is made to be true, if the names are the same and this + package provides all features required by the other specification. + + Source type checks are explicitly ignored here as this is not of interest. + """ + return self.name == other.name and self.features.issuperset(other.features) + + def is_same_source_as(self, other: PackageSpecification) -> bool: + if self._source_type != other.source_type: + return False + + if not self._source_type: + # both packages are of source type None + # no need to check further + return True + + if ( + self._source_url or other.source_url + ) and self._source_url != other.source_url: + return False + + if ( + self._source_subdirectory or other.source_subdirectory + ) and self._source_subdirectory != other.source_subdirectory: + return False + + # We check the resolved reference first: + # if they match we assume equality regardless + # of their source reference. + # This is important when comparing a resolved branch VCS + # dependency to a direct commit reference VCS dependency + if ( + self._source_resolved_reference + and other.source_resolved_reference + and self._source_resolved_reference == other.source_resolved_reference + ): + return True + + if self._source_reference or other.source_reference: + # special handling for packages with references + if not self._source_reference or not other.source_reference: + # case: one reference is defined and is non-empty, but other is not + return False + + if not ( + self._source_reference == other.source_reference + or self._source_reference.startswith(other.source_reference) + or other.source_reference.startswith(self._source_reference) + ): + # case: both references defined, but one is not equal to or a short + # representation of the other + return False + + if ( + self._source_resolved_reference + and other.source_resolved_reference + and self._source_resolved_reference != other.source_resolved_reference + ): + return False + + return True + + def is_same_package_as(self, other: PackageSpecification) -> bool: + if other.complete_name != self.complete_name: + return False + + return self.is_same_source_as(other) + + def clone(self: T) -> T: + return copy.deepcopy(self) + + def with_features(self: T, features: Iterable[str]) -> T: + package = self.clone() + + package._features = frozenset( + canonicalize_name(feature) for feature in features + ) + + return package + + def without_features(self: T) -> T: + return self.with_features([]) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, PackageSpecification): + return NotImplemented + return self.is_same_package_as(other) + + def __hash__(self) -> int: + result = hash(self.complete_name) # complete_name includes features + + if self._source_type: + # Don't include _source_reference and _source_resolved_reference in hash + # because two specs can be equal even if these attributes are not equal. + # (They must still meet certain conditions. See is_same_source_as().) + result ^= ( + hash(self._source_type) + ^ hash(self._source_url) + ^ hash(self._source_subdirectory) + ) + + return result + + def __str__(self) -> str: + raise NotImplementedError() diff --git a/src/poetry/core/packages/url_dependency.py b/src/poetry/core/packages/url_dependency.py new file mode 100644 index 0000000..610eb31 --- /dev/null +++ b/src/poetry/core/packages/url_dependency.py @@ -0,0 +1,63 @@ +from __future__ import annotations + +from typing import Iterable +from urllib.parse import urlparse + +from poetry.core.packages.dependency import Dependency + + +class URLDependency(Dependency): + def __init__( + self, + name: str, + url: str, + *, + directory: str | None = None, + groups: Iterable[str] | None = None, + optional: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + self._url = url + self._directory = directory + + parsed = urlparse(url) + if not parsed.scheme or not parsed.netloc: + raise ValueError(f"{url} does not seem like a valid url") + + super().__init__( + name, + "*", + groups=groups, + optional=optional, + allows_prereleases=True, + source_type="url", + source_url=self._url, + source_subdirectory=directory, + extras=extras, + ) + + @property + def url(self) -> str: + return self._url + + @property + def directory(self) -> str | None: + return self._directory + + @property + def base_pep_508_name(self) -> str: + requirement = self.pretty_name + + if self.extras: + extras = ",".join(sorted(self.extras)) + requirement += f"[{extras}]" + + requirement += f" @ {self._url}" + + if self.directory: + requirement += f"#subdirectory={self.directory}" + + return requirement + + def is_url(self) -> bool: + return True diff --git a/src/poetry/core/packages/utils/__init__.py b/src/poetry/core/packages/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/packages/utils/link.py b/src/poetry/core/packages/utils/link.py new file mode 100644 index 0000000..c6d8277 --- /dev/null +++ b/src/poetry/core/packages/utils/link.py @@ -0,0 +1,232 @@ +from __future__ import annotations + +import posixpath +import re +import urllib.parse as urlparse + +from poetry.core.packages.utils.utils import path_to_url +from poetry.core.packages.utils.utils import splitext + + +class Link: + def __init__( + self, + url: str, + requires_python: str | None = None, + metadata: str | bool | None = None, + yanked: str | bool = False, + ) -> None: + """ + Object representing a parsed link from https://pypi.python.org/simple/* + + url: + url of the resource pointed to (href of the link) + requires_python: + String containing the `Requires-Python` metadata field, specified + in PEP 345. This may be specified by a data-requires-python + attribute in the HTML link tag, as described in PEP 503. + metadata: + String of the syntax `=` representing the hash + of the Core Metadata file. This may be specified by a + data-dist-info-metadata attribute in the HTML link tag, as described + in PEP 658. + yanked: + False, if the data-yanked attribute is not present. + A string, if the data-yanked attribute has a string value. + True, if the data-yanked attribute is present but has no value. + According to PEP 592. + """ + + # url can be a UNC windows share + if url.startswith("\\\\"): + url = path_to_url(url) + + self.url = url + self.requires_python = requires_python if requires_python else None + + if isinstance(metadata, str): + metadata = {"true": True, "": False, "false": False}.get( + metadata.strip().lower(), metadata + ) + + self._metadata = metadata + self._yanked = yanked + + def __str__(self) -> str: + if self.requires_python: + rp = f" (requires-python:{self.requires_python})" + else: + rp = "" + + return f"{self.url}{rp}" + + def __repr__(self) -> str: + return f"" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url == other.url + + def __ne__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url != other.url + + def __lt__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url < other.url + + def __le__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url <= other.url + + def __gt__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url > other.url + + def __ge__(self, other: object) -> bool: + if not isinstance(other, Link): + return NotImplemented + return self.url >= other.url + + def __hash__(self) -> int: + return hash(self.url) + + @property + def filename(self) -> str: + _, netloc, path, _, _ = urlparse.urlsplit(self.url) + name = posixpath.basename(path.rstrip("/")) or netloc + name = urlparse.unquote(name) + + return name + + @property + def scheme(self) -> str: + return urlparse.urlsplit(self.url)[0] + + @property + def netloc(self) -> str: + return urlparse.urlsplit(self.url)[1] + + @property + def path(self) -> str: + return urlparse.unquote(urlparse.urlsplit(self.url)[2]) + + def splitext(self) -> tuple[str, str]: + return splitext(posixpath.basename(self.path.rstrip("/"))) + + @property + def ext(self) -> str: + return self.splitext()[1] + + @property + def url_without_fragment(self) -> str: + scheme, netloc, path, query, fragment = urlparse.urlsplit(self.url) + return urlparse.urlunsplit((scheme, netloc, path, query, None)) + + _egg_fragment_re = re.compile(r"[#&]egg=([^&]*)") + + @property + def egg_fragment(self) -> str | None: + match = self._egg_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _subdirectory_fragment_re = re.compile(r"[#&]subdirectory=([^&]*)") + + @property + def subdirectory_fragment(self) -> str | None: + match = self._subdirectory_fragment_re.search(self.url) + if not match: + return None + return match.group(1) + + _hash_re = re.compile(r"(sha1|sha224|sha384|sha256|sha512|md5)=([a-f0-9]+)") + + @property + def has_metadata(self) -> bool: + if self._metadata is None: + return False + return bool(self._metadata) and (self.is_wheel or self.is_sdist) + + @property + def metadata_url(self) -> str | None: + if self.has_metadata: + return f"{self.url_without_fragment.split('?', 1)[0]}.metadata" + return None + + @property + def metadata_hash(self) -> str | None: + if self.has_metadata and isinstance(self._metadata, str): + match = self._hash_re.search(self._metadata) + if match: + return match.group(2) + return None + + @property + def metadata_hash_name(self) -> str | None: + if self.has_metadata and isinstance(self._metadata, str): + match = self._hash_re.search(self._metadata) + if match: + return match.group(1) + return None + + @property + def hash(self) -> str | None: + match = self._hash_re.search(self.url) + if match: + return match.group(2) + return None + + @property + def hash_name(self) -> str | None: + match = self._hash_re.search(self.url) + if match: + return match.group(1) + return None + + @property + def show_url(self) -> str: + return posixpath.basename(self.url.split("#", 1)[0].split("?", 1)[0]) + + @property + def is_wheel(self) -> bool: + return self.ext == ".whl" + + @property + def is_wininst(self) -> bool: + return self.ext == ".exe" + + @property + def is_egg(self) -> bool: + return self.ext == ".egg" + + @property + def is_sdist(self) -> bool: + return self.ext in {".tar.bz2", ".tar.gz", ".zip"} + + @property + def is_artifact(self) -> bool: + """ + Determines if this points to an actual artifact (e.g. a tarball) or if + it points to an "abstract" thing like a path or a VCS location. + """ + if self.scheme in ["ssh", "git", "hg", "bzr", "sftp", "svn"]: + return False + + return True + + @property + def yanked(self) -> bool: + return isinstance(self._yanked, str) or bool(self._yanked) + + @property + def yanked_reason(self) -> str: + if isinstance(self._yanked, str): + return self._yanked + return "" diff --git a/src/poetry/core/packages/utils/utils.py b/src/poetry/core/packages/utils/utils.py new file mode 100644 index 0000000..4b4853c --- /dev/null +++ b/src/poetry/core/packages/utils/utils.py @@ -0,0 +1,391 @@ +from __future__ import annotations + +import functools +import posixpath +import re +import sys + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Dict +from typing import List +from typing import Tuple +from urllib.parse import unquote +from urllib.parse import urlsplit +from urllib.request import url2pathname + +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.constraints.version import parse_constraint +from poetry.core.pyproject.toml import PyProjectTOML +from poetry.core.version.markers import dnf + + +if TYPE_CHECKING: + from poetry.core.constraints.generic import BaseConstraint + from poetry.core.constraints.version import VersionConstraint + from poetry.core.version.markers import BaseMarker + + # Even though we've `from __future__ import annotations`, mypy doesn't seem to like + # this as `dict[str, ...]` + ConvertedMarkers = Dict[str, List[List[Tuple[str, str]]]] + + +BZ2_EXTENSIONS = (".tar.bz2", ".tbz") +XZ_EXTENSIONS = (".tar.xz", ".txz", ".tlz", ".tar.lz", ".tar.lzma") +ZIP_EXTENSIONS = (".zip", ".whl") +TAR_EXTENSIONS = (".tar.gz", ".tgz", ".tar") +ARCHIVE_EXTENSIONS = ZIP_EXTENSIONS + BZ2_EXTENSIONS + TAR_EXTENSIONS + XZ_EXTENSIONS +SUPPORTED_EXTENSIONS: tuple[str, ...] = ZIP_EXTENSIONS + TAR_EXTENSIONS + +try: + import bz2 # noqa: F401 + + SUPPORTED_EXTENSIONS += BZ2_EXTENSIONS +except ImportError: + pass + +try: + # Only for Python 3.3+ + import lzma # noqa: F401 + + SUPPORTED_EXTENSIONS += XZ_EXTENSIONS +except ImportError: + pass + + +def path_to_url(path: str | Path) -> str: + """ + Convert a path to a file: URL. The path will be made absolute unless otherwise + specified and have quoted path parts. + """ + return Path(path).absolute().as_uri() + + +def url_to_path(url: str) -> Path: + """ + Convert an RFC8089 file URI to path. + + The logic used here is borrowed from pip + https://github.com/pypa/pip/blob/4d1932fcdd1974c820ea60b3286984ebb0c3beaa/src/pip/_internal/utils/urls.py#L31 + """ + if not url.startswith("file:"): + raise ValueError(f"{url} is not a valid file URI") + + _, netloc, path, _, _ = urlsplit(url) + + if not netloc or netloc == "localhost": + # According to RFC 8089, same as empty authority. + netloc = "" + elif netloc not in {".", ".."} and sys.platform == "win32": + # If we have a UNC path, prepend UNC share notation. + netloc = "\\\\" + netloc + else: + raise ValueError( + f"non-local file URIs are not supported on this platform: {url}" + ) + + return Path(url2pathname(netloc + unquote(path))) + + +def is_url(name: str) -> bool: + if ":" not in name: + return False + scheme = name.split(":", 1)[0].lower() + + return scheme in [ + "http", + "https", + "file", + "ftp", + "ssh", + "git", + "hg", + "bzr", + "sftp", + "svn", + "ssh", + ] + + +def strip_extras(path: str) -> tuple[str, str | None]: + m = re.match(r"^(.+)(\[[^\]]+\])$", path) + extras = None + if m: + path_no_extras = m.group(1) + extras = m.group(2) + else: + path_no_extras = path + + return path_no_extras, extras + + +@functools.lru_cache(maxsize=None) +def is_python_project(path: Path) -> bool: + """Return true if the directory is a Python project""" + if not path.is_dir(): + return False + + setup_py = path / "setup.py" + setup_cfg = path / "setup.cfg" + setuptools_project = setup_py.exists() or setup_cfg.exists() + + pyproject = PyProjectTOML(path / "pyproject.toml") + + supports_pep517 = setuptools_project or pyproject.is_build_system_defined() + supports_poetry = pyproject.is_poetry_project() + + return supports_pep517 or supports_poetry + + +def is_archive_file(name: str) -> bool: + """Return True if `name` is a considered as an archive file.""" + ext = splitext(name)[1].lower() + if ext in ARCHIVE_EXTENSIONS: + return True + return False + + +def splitext(path: str) -> tuple[str, str]: + """Like os.path.splitext, but take off .tar too""" + base, ext = posixpath.splitext(path) + if base.lower().endswith(".tar"): + ext = base[-4:] + ext + base = base[:-4] + return base, ext + + +def convert_markers(marker: BaseMarker) -> ConvertedMarkers: + from poetry.core.version.markers import MarkerUnion + from poetry.core.version.markers import MultiMarker + from poetry.core.version.markers import SingleMarker + + requirements: ConvertedMarkers = {} + marker = dnf(marker) + conjunctions = marker.markers if isinstance(marker, MarkerUnion) else [marker] + group_count = len(conjunctions) + + def add_constraint( + marker_name: str, constraint: tuple[str, str], group_index: int + ) -> None: + # python_full_version is equivalent to python_version + # for Poetry so we merge them + if marker_name == "python_full_version": + marker_name = "python_version" + if marker_name not in requirements: + requirements[marker_name] = [[] for _ in range(group_count)] + requirements[marker_name][group_index].append(constraint) + + for i, sub_marker in enumerate(conjunctions): + if isinstance(sub_marker, MultiMarker): + for m in sub_marker.markers: + if isinstance(m, SingleMarker): + add_constraint(m.name, (m.operator, m.value), i) + elif isinstance(sub_marker, SingleMarker): + add_constraint(sub_marker.name, (sub_marker.operator, sub_marker.value), i) + + for group_name in requirements: + # remove duplicates + seen = [] + for r in requirements[group_name]: + if r not in seen: + seen.append(r) + requirements[group_name] = seen + + return requirements + + +def contains_group_without_marker(markers: ConvertedMarkers, marker_name: str) -> bool: + return marker_name not in markers or [] in markers[marker_name] + + +def create_nested_marker( + name: str, + constraint: BaseConstraint | VersionConstraint, +) -> str: + from poetry.core.constraints.generic import Constraint + from poetry.core.constraints.generic import MultiConstraint + from poetry.core.constraints.generic import UnionConstraint + from poetry.core.constraints.version import VersionUnion + + if constraint.is_any(): + return "" + + if isinstance(constraint, (MultiConstraint, UnionConstraint)): + multi_parts = [] + for c in constraint.constraints: + multi = isinstance(c, (MultiConstraint, UnionConstraint)) + multi_parts.append((multi, create_nested_marker(name, c))) + + glue = " and " + if isinstance(constraint, UnionConstraint): + parts = [f"({part[1]})" if part[0] else part[1] for part in multi_parts] + glue = " or " + else: + parts = [part[1] for part in multi_parts] + + marker = glue.join(parts) + elif isinstance(constraint, Constraint): + marker = f'{name} {constraint.operator} "{constraint.version}"' + elif isinstance(constraint, VersionUnion): + parts = [create_nested_marker(name, c) for c in constraint.ranges] + glue = " or " + parts = [f"({part})" for part in parts] + marker = glue.join(parts) + elif isinstance(constraint, Version): + if name == "python_version" and constraint.precision >= 3: + name = "python_full_version" + + marker = f'{name} == "{constraint.text}"' + else: + assert isinstance(constraint, VersionRange) + min_name = max_name = name + + parts = [] + + # `python_version` is a special case: to keep the constructed marker equivalent + # to the constraint we need to be careful with the precision. + # + # PEP 440 tells us that when we come to make the comparison the release + # segment will be zero padded: eg "<= 3.10" is equivalent to "<= 3.10.0". + # + # But "python_version <= 3.10" is _not_ equivalent to "python_version <= 3.10.0" + # - see normalize_python_version_markers. + # + # A similar issue arises for a constraint like "> 3.6". + if constraint.min is not None: + op = ">=" if constraint.include_min else ">" + version = constraint.min + if min_name == "python_version" and version.precision >= 3: + min_name = "python_full_version" + + if ( + min_name == "python_version" + and not constraint.include_min + and version.precision < 3 + ): + padding = ".0" * (3 - version.precision) + part = f'python_full_version > "{version}{padding}"' + else: + part = f'{min_name} {op} "{version}"' + + parts.append(part) + + if constraint.max is not None: + op = "<=" if constraint.include_max else "<" + version = constraint.max + if max_name == "python_version" and version.precision >= 3: + max_name = "python_full_version" + + if ( + max_name == "python_version" + and constraint.include_max + and version.precision < 3 + ): + padding = ".0" * (3 - version.precision) + part = f'python_full_version <= "{version}{padding}"' + else: + part = f'{max_name} {op} "{version}"' + + parts.append(part) + + marker = " and ".join(parts) + + return marker + + +def get_python_constraint_from_marker( + marker: BaseMarker, +) -> VersionConstraint: + from poetry.core.constraints.version import EmptyConstraint + from poetry.core.constraints.version import VersionRange + + python_marker = marker.only("python_version", "python_full_version") + if python_marker.is_any(): + return VersionRange() + + if python_marker.is_empty(): + return EmptyConstraint() + + markers = convert_markers(marker) + if contains_group_without_marker(markers, "python_version"): + # groups are in disjunctive normal form (DNF), + # an empty group means that python_version does not appear in this group, + # which means that python_version is arbitrary for this group + return VersionRange() + + python_version_markers = markers["python_version"] + normalized = normalize_python_version_markers(python_version_markers) + constraint = parse_constraint(normalized) + return constraint + + +def normalize_python_version_markers( # NOSONAR + disjunction: list[list[tuple[str, str]]], +) -> str: + ors = [] + for or_ in disjunction: + ands = [] + for op, version in or_: + # Expand python version + if op == "==" and "*" not in version and version.count(".") < 2: + version = "~" + version + op = "" + + elif op == "!=" and "*" not in version and version.count(".") < 2: + version += ".*" + + elif op in ("<=", ">"): + # Make adjustments on encountering versions with less than full + # precision. + # + # Per PEP-508: + # python_version <-> '.'.join(platform.python_version_tuple()[:2]) + # + # So for two digits of precision we make the following adjustments: + # - `python_version > "x.y"` requires version >= x.(y+1).anything + # - `python_version <= "x.y"` requires version < x.(y+1).anything + # + # Treatment when we see a single digit of precision is less clear: is + # that even a legitimate marker? + # + # Experiment suggests that pip behaviour is essentially to make a + # lexicographical comparison, for example `python_version > "3"` is + # satisfied by version 3.anything, whereas `python_version <= "3"` is + # satisfied only by version 2.anything. + # + # We achieve the above by fiddling with the operator and version in the + # marker. + parsed_version = Version.parse(version) + if parsed_version.precision < 3: + if op == "<=": + op = "<" + elif op == ">": + op = ">=" + + if parsed_version.precision == 2: + version = parsed_version.next_minor().text + + elif op in ("in", "not in"): + versions = [] + for v in re.split("[ ,]+", version): + split = v.split(".") + if len(split) in [1, 2]: + split.append("*") + op_ = "" if op == "in" else "!=" + else: + op_ = "==" if op == "in" else "!=" + + versions.append(op_ + ".".join(split)) + + if versions: + glue = " || " if op == "in" else ", " + ands.append(glue.join(versions)) + + continue + + ands.append(f"{op}{version}") + + ors.append(" ".join(ands)) + + return " || ".join(ors) diff --git a/src/poetry/core/packages/vcs_dependency.py b/src/poetry/core/packages/vcs_dependency.py new file mode 100644 index 0000000..f79e8b7 --- /dev/null +++ b/src/poetry/core/packages/vcs_dependency.py @@ -0,0 +1,125 @@ +from __future__ import annotations + +from typing import Iterable + +from poetry.core.packages.dependency import Dependency + + +class VCSDependency(Dependency): + """ + Represents a VCS dependency + """ + + def __init__( + self, + name: str, + vcs: str, + source: str, + branch: str | None = None, + tag: str | None = None, + rev: str | None = None, + resolved_rev: str | None = None, + directory: str | None = None, + groups: Iterable[str] | None = None, + optional: bool = False, + develop: bool = False, + extras: Iterable[str] | None = None, + ) -> None: + self._vcs = vcs + self._source = source + + self._branch = branch + self._tag = tag + self._rev = rev + self._directory = directory + self._develop = develop + + super().__init__( + name, + "*", + groups=groups, + optional=optional, + allows_prereleases=True, + source_type=self._vcs.lower(), + source_url=self._source, + source_reference=branch or tag or rev or "HEAD", + source_resolved_reference=resolved_rev, + source_subdirectory=directory, + extras=extras, + ) + + @property + def vcs(self) -> str: + return self._vcs + + @property + def source(self) -> str: + return self._source + + @property + def branch(self) -> str | None: + return self._branch + + @property + def tag(self) -> str | None: + return self._tag + + @property + def rev(self) -> str | None: + return self._rev + + @property + def directory(self) -> str | None: + return self._directory + + @property + def develop(self) -> bool: + return self._develop + + @property + def reference(self) -> str: + reference = self._branch or self._tag or self._rev or "" + return reference + + @property + def pretty_constraint(self) -> str: + if self._branch: + what = "branch" + version = self._branch + elif self._tag: + what = "tag" + version = self._tag + elif self._rev: + what = "rev" + version = self._rev + else: + return "" + + return f"{what} {version}" + + @property + def base_pep_508_name(self) -> str: + from poetry.core.vcs import git + + requirement = self.pretty_name + parsed_url = git.ParsedUrl.parse(self._source) + + if self.extras: + extras = ",".join(sorted(self.extras)) + requirement += f"[{extras}]" + + if parsed_url.protocol is not None: + requirement += f" @ {self._vcs}+{self._source}" + else: + requirement += f" @ {self._vcs}+ssh://{parsed_url.format()}" + + if self.reference: + requirement += f"@{self.reference}" + + if self._directory: + requirement += f"#subdirectory={self._directory}" + + return requirement + + def is_vcs(self) -> bool: + return True diff --git a/src/poetry/core/poetry.py b/src/poetry/core/poetry.py new file mode 100644 index 0000000..fbcd464 --- /dev/null +++ b/src/poetry/core/poetry.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + + +if TYPE_CHECKING: + from pathlib import Path + + from poetry.core.packages.project_package import ProjectPackage + from poetry.core.pyproject.toml import PyProjectTOML + from poetry.core.toml import TOMLFile + + +class Poetry: + def __init__( + self, + file: Path, + local_config: dict[str, Any], + package: ProjectPackage, + ) -> None: + from poetry.core.pyproject.toml import PyProjectTOML + + self._pyproject = PyProjectTOML(file) + self._package = package + self._local_config = local_config + + @property + def pyproject(self) -> PyProjectTOML: + return self._pyproject + + @property + def file(self) -> TOMLFile: + return self._pyproject.file + + @property + def package(self) -> ProjectPackage: + return self._package + + @property + def local_config(self) -> dict[str, Any]: + return self._local_config + + def get_project_config(self, config: str, default: Any = None) -> Any: + return self._local_config.get("config", {}).get(config, default) diff --git a/src/poetry/core/py.typed b/src/poetry/core/py.typed new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/pyproject/__init__.py b/src/poetry/core/pyproject/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/pyproject/exceptions.py b/src/poetry/core/pyproject/exceptions.py new file mode 100644 index 0000000..ca01aaf --- /dev/null +++ b/src/poetry/core/pyproject/exceptions.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from poetry.core.exceptions import PoetryCoreException + + +class PyProjectException(PoetryCoreException): + pass diff --git a/src/poetry/core/pyproject/tables.py b/src/poetry/core/pyproject/tables.py new file mode 100644 index 0000000..99a4c83 --- /dev/null +++ b/src/poetry/core/pyproject/tables.py @@ -0,0 +1,55 @@ +from __future__ import annotations + +from contextlib import suppress +from pathlib import Path +from typing import TYPE_CHECKING + + +if TYPE_CHECKING: + from poetry.core.packages.dependency import Dependency + + +# TODO: Convert to dataclass once python 2.7, 3.5 is dropped +class BuildSystem: + def __init__( + self, build_backend: str | None = None, requires: list[str] | None = None + ) -> None: + self.build_backend = ( + build_backend + if build_backend is not None + else "setuptools.build_meta:__legacy__" + ) + self.requires = requires if requires is not None else ["setuptools", "wheel"] + self._dependencies: list[Dependency] | None = None + + @property + def dependencies(self) -> list[Dependency]: + if self._dependencies is None: + # avoid circular dependency when loading DirectoryDependency + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + + self._dependencies = [] + for requirement in self.requires: + dependency = None + try: + dependency = Dependency.create_from_pep_508(requirement) + except ValueError: + # PEP 517 requires can be path if not PEP 508 + path = Path(requirement) + # compatibility Python < 3.8 + # https://docs.python.org/3/library/pathlib.html#methods + with suppress(OSError): + if path.is_file(): + dependency = FileDependency(name=path.name, path=path) + elif path.is_dir(): + dependency = DirectoryDependency(name=path.name, path=path) + + if dependency is None: + # skip since we could not determine requirement + continue + + self._dependencies.append(dependency) + + return self._dependencies diff --git a/src/poetry/core/pyproject/toml.py b/src/poetry/core/pyproject/toml.py new file mode 100644 index 0000000..a9be9e3 --- /dev/null +++ b/src/poetry/core/pyproject/toml.py @@ -0,0 +1,114 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + +from tomlkit.api import table + + +if TYPE_CHECKING: + from pathlib import Path + + from tomlkit.toml_document import TOMLDocument + + from poetry.core.pyproject.tables import BuildSystem + from poetry.core.toml import TOMLFile + + +class PyProjectTOML: + def __init__(self, path: str | Path) -> None: + from poetry.core.toml import TOMLFile + + self._file = TOMLFile(path=path) + self._data: TOMLDocument | None = None + self._build_system: BuildSystem | None = None + + @property + def file(self) -> TOMLFile: + return self._file + + @property + def data(self) -> TOMLDocument: + from tomlkit.toml_document import TOMLDocument + + if self._data is None: + if not self._file.exists(): + self._data = TOMLDocument() + else: + self._data = self._file.read() + + return self._data + + def is_build_system_defined(self) -> bool: + return self._file.exists() and "build-system" in self.data + + @property + def build_system(self) -> BuildSystem: + from poetry.core.pyproject.tables import BuildSystem + + if self._build_system is None: + build_backend = None + requires = None + + if not self._file.exists(): + build_backend = "poetry.core.masonry.api" + requires = ["poetry-core"] + + container = self.data.get("build-system", {}) + self._build_system = BuildSystem( + build_backend=container.get("build-backend", build_backend), + requires=container.get("requires", requires), + ) + + return self._build_system + + @property + def poetry_config(self) -> dict[str, Any]: + from tomlkit.exceptions import NonExistentKey + + try: + tool = self.data["tool"] + assert isinstance(tool, dict) + config = tool["poetry"] + assert isinstance(config, dict) + return config + except NonExistentKey as e: + from poetry.core.pyproject.exceptions import PyProjectException + + raise PyProjectException( + f"[tool.poetry] section not found in {self._file}" + ) from e + + def is_poetry_project(self) -> bool: + from poetry.core.pyproject.exceptions import PyProjectException + + if self.file.exists(): + try: + _ = self.poetry_config + return True + except PyProjectException: + pass + + return False + + def __getattr__(self, item: str) -> Any: + return getattr(self.data, item) + + def save(self) -> None: + data = self.data + + if self._build_system is not None: + if "build-system" not in data: + data["build-system"] = table() + + build_system = data["build-system"] + assert isinstance(build_system, dict) + + build_system["requires"] = self._build_system.requires + build_system["build-backend"] = self._build_system.build_backend + + self.file.write(data=data) + + def reload(self) -> None: + self._data = None + self._build_system = None diff --git a/src/poetry/core/semver/__init__.py b/src/poetry/core/semver/__init__.py new file mode 100644 index 0000000..be39559 --- /dev/null +++ b/src/poetry/core/semver/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +import warnings + + +warnings.warn( + "poetry.core.semver is deprecated. Use poetry.core.constraints.version instead.", + DeprecationWarning, + stacklevel=2, +) diff --git a/src/poetry/core/semver/empty_constraint.py b/src/poetry/core/semver/empty_constraint.py new file mode 100644 index 0000000..8559866 --- /dev/null +++ b/src/poetry/core/semver/empty_constraint.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import EmptyConstraint + + +__all__ = ["EmptyConstraint"] diff --git a/src/poetry/core/semver/exceptions.py b/src/poetry/core/semver/exceptions.py new file mode 100644 index 0000000..7f7bfae --- /dev/null +++ b/src/poetry/core/semver/exceptions.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version.exceptions import ParseConstraintError + + +__all__ = ["ParseConstraintError"] diff --git a/src/poetry/core/semver/helpers.py b/src/poetry/core/semver/helpers.py new file mode 100644 index 0000000..065d366 --- /dev/null +++ b/src/poetry/core/semver/helpers.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from poetry.core.constraints.version.parser import parse_constraint +from poetry.core.constraints.version.parser import parse_single_constraint + + +__all__ = ["parse_constraint", "parse_single_constraint"] diff --git a/src/poetry/core/semver/patterns.py b/src/poetry/core/semver/patterns.py new file mode 100644 index 0000000..b2d8658 --- /dev/null +++ b/src/poetry/core/semver/patterns.py @@ -0,0 +1,18 @@ +from __future__ import annotations + +from poetry.core.constraints.version.patterns import BASIC_CONSTRAINT +from poetry.core.constraints.version.patterns import CARET_CONSTRAINT +from poetry.core.constraints.version.patterns import COMPLETE_VERSION +from poetry.core.constraints.version.patterns import TILDE_CONSTRAINT +from poetry.core.constraints.version.patterns import TILDE_PEP440_CONSTRAINT +from poetry.core.constraints.version.patterns import X_CONSTRAINT + + +__all__ = [ + "COMPLETE_VERSION", + "CARET_CONSTRAINT", + "TILDE_CONSTRAINT", + "TILDE_PEP440_CONSTRAINT", + "X_CONSTRAINT", + "BASIC_CONSTRAINT", +] diff --git a/src/poetry/core/semver/util.py b/src/poetry/core/semver/util.py new file mode 100644 index 0000000..eabcc06 --- /dev/null +++ b/src/poetry/core/semver/util.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import constraint_regions + + +__all__ = ["constraint_regions"] diff --git a/src/poetry/core/semver/version.py b/src/poetry/core/semver/version.py new file mode 100644 index 0000000..d4ec91a --- /dev/null +++ b/src/poetry/core/semver/version.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import Version + + +__all__ = ["Version"] diff --git a/src/poetry/core/semver/version_constraint.py b/src/poetry/core/semver/version_constraint.py new file mode 100644 index 0000000..8cc5d94 --- /dev/null +++ b/src/poetry/core/semver/version_constraint.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import VersionConstraint + + +__all__ = ["VersionConstraint"] diff --git a/src/poetry/core/semver/version_range.py b/src/poetry/core/semver/version_range.py new file mode 100644 index 0000000..76117c8 --- /dev/null +++ b/src/poetry/core/semver/version_range.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import VersionRange + + +__all__ = ["VersionRange"] diff --git a/src/poetry/core/semver/version_range_constraint.py b/src/poetry/core/semver/version_range_constraint.py new file mode 100644 index 0000000..7af781c --- /dev/null +++ b/src/poetry/core/semver/version_range_constraint.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import VersionRangeConstraint + + +__all__ = ["VersionRangeConstraint"] diff --git a/src/poetry/core/semver/version_union.py b/src/poetry/core/semver/version_union.py new file mode 100644 index 0000000..567c17c --- /dev/null +++ b/src/poetry/core/semver/version_union.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +from poetry.core.constraints.version import VersionUnion + + +__all__ = ["VersionUnion"] diff --git a/src/poetry/core/spdx/__init__.py b/src/poetry/core/spdx/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/spdx/data/licenses.json b/src/poetry/core/spdx/data/licenses.json new file mode 100644 index 0000000..6a241f6 --- /dev/null +++ b/src/poetry/core/spdx/data/licenses.json @@ -0,0 +1,1847 @@ +{ + "0BSD": [ + "BSD Zero Clause License", + false, + false + ], + "AAL": [ + "Attribution Assurance License", + true, + false + ], + "ADSL": [ + "Amazon Digital Services License", + false, + false + ], + "AFL-1.1": [ + "Academic Free License v1.1", + true, + false + ], + "AFL-1.2": [ + "Academic Free License v1.2", + true, + false + ], + "AFL-2.0": [ + "Academic Free License v2.0", + true, + false + ], + "AFL-2.1": [ + "Academic Free License v2.1", + true, + false + ], + "AFL-3.0": [ + "Academic Free License v3.0", + true, + false + ], + "AGPL-1.0": [ + "Affero General Public License v1.0", + false, + false + ], + "AGPL-3.0": [ + "GNU Affero General Public License v3.0", + true, + true + ], + "AGPL-3.0-only": [ + "GNU Affero General Public License v3.0 only", + true, + false + ], + "AGPL-3.0-or-later": [ + "GNU Affero General Public License v3.0 or later", + true, + false + ], + "AMDPLPA": [ + "AMD's plpa_map.c License", + false, + false + ], + "AML": [ + "Apple MIT License", + false, + false + ], + "AMPAS": [ + "Academy of Motion Picture Arts and Sciences BSD", + false, + false + ], + "ANTLR-PD": [ + "ANTLR Software Rights Notice", + false, + false + ], + "APAFML": [ + "Adobe Postscript AFM License", + false, + false + ], + "APL-1.0": [ + "Adaptive Public License 1.0", + true, + false + ], + "APSL-1.0": [ + "Apple Public Source License 1.0", + true, + false + ], + "APSL-1.1": [ + "Apple Public Source License 1.1", + true, + false + ], + "APSL-1.2": [ + "Apple Public Source License 1.2", + true, + false + ], + "APSL-2.0": [ + "Apple Public Source License 2.0", + true, + false + ], + "Abstyles": [ + "Abstyles License", + false, + false + ], + "Adobe-2006": [ + "Adobe Systems Incorporated Source Code License Agreement", + false, + false + ], + "Adobe-Glyph": [ + "Adobe Glyph List License", + false, + false + ], + "Afmparse": [ + "Afmparse License", + false, + false + ], + "Aladdin": [ + "Aladdin Free Public License", + false, + false + ], + "Apache-1.0": [ + "Apache License 1.0", + false, + false + ], + "Apache-1.1": [ + "Apache License 1.1", + true, + false + ], + "Apache-2.0": [ + "Apache License 2.0", + true, + false + ], + "Artistic-1.0": [ + "Artistic License 1.0", + true, + false + ], + "Artistic-1.0-Perl": [ + "Artistic License 1.0 (Perl)", + true, + false + ], + "Artistic-1.0-cl8": [ + "Artistic License 1.0 w/clause 8", + true, + false + ], + "Artistic-2.0": [ + "Artistic License 2.0", + true, + false + ], + "BSD-1-Clause": [ + "BSD 1-Clause License", + false, + false + ], + "BSD-2-Clause": [ + "BSD 2-Clause \"Simplified\" License", + true, + false + ], + "BSD-2-Clause-FreeBSD": [ + "BSD 2-Clause FreeBSD License", + false, + false + ], + "BSD-2-Clause-NetBSD": [ + "BSD 2-Clause NetBSD License", + false, + false + ], + "BSD-2-Clause-Patent": [ + "BSD-2-Clause Plus Patent License", + true, + false + ], + "BSD-3-Clause": [ + "BSD 3-Clause \"New\" or \"Revised\" License", + true, + false + ], + "BSD-3-Clause-Attribution": [ + "BSD with attribution", + false, + false + ], + "BSD-3-Clause-Clear": [ + "BSD 3-Clause Clear License", + false, + false + ], + "BSD-3-Clause-LBNL": [ + "Lawrence Berkeley National Labs BSD variant license", + false, + false + ], + "BSD-3-Clause-No-Nuclear-License": [ + "BSD 3-Clause No Nuclear License", + false, + false + ], + "BSD-3-Clause-No-Nuclear-License-2014": [ + "BSD 3-Clause No Nuclear License 2014", + false, + false + ], + "BSD-3-Clause-No-Nuclear-Warranty": [ + "BSD 3-Clause No Nuclear Warranty", + false, + false + ], + "BSD-4-Clause": [ + "BSD 4-Clause \"Original\" or \"Old\" License", + false, + false + ], + "BSD-4-Clause-UC": [ + "BSD-4-Clause (University of California-Specific)", + false, + false + ], + "BSD-Protection": [ + "BSD Protection License", + false, + false + ], + "BSD-Source-Code": [ + "BSD Source Code Attribution", + false, + false + ], + "BSL-1.0": [ + "Boost Software License 1.0", + true, + false + ], + "Bahyph": [ + "Bahyph License", + false, + false + ], + "Barr": [ + "Barr License", + false, + false + ], + "Beerware": [ + "Beerware License", + false, + false + ], + "BitTorrent-1.0": [ + "BitTorrent Open Source License v1.0", + false, + false + ], + "BitTorrent-1.1": [ + "BitTorrent Open Source License v1.1", + false, + false + ], + "Borceux": [ + "Borceux license", + false, + false + ], + "CATOSL-1.1": [ + "Computer Associates Trusted Open Source License 1.1", + true, + false + ], + "CC-BY-1.0": [ + "Creative Commons Attribution 1.0", + false, + false + ], + "CC-BY-2.0": [ + "Creative Commons Attribution 2.0", + false, + false + ], + "CC-BY-2.5": [ + "Creative Commons Attribution 2.5", + false, + false + ], + "CC-BY-3.0": [ + "Creative Commons Attribution 3.0", + false, + false + ], + "CC-BY-4.0": [ + "Creative Commons Attribution 4.0", + false, + false + ], + "CC-BY-NC-1.0": [ + "Creative Commons Attribution Non Commercial 1.0", + false, + false + ], + "CC-BY-NC-2.0": [ + "Creative Commons Attribution Non Commercial 2.0", + false, + false + ], + "CC-BY-NC-2.5": [ + "Creative Commons Attribution Non Commercial 2.5", + false, + false + ], + "CC-BY-NC-3.0": [ + "Creative Commons Attribution Non Commercial 3.0", + false, + false + ], + "CC-BY-NC-4.0": [ + "Creative Commons Attribution Non Commercial 4.0", + false, + false + ], + "CC-BY-NC-ND-1.0": [ + "Creative Commons Attribution Non Commercial No Derivatives 1.0", + false, + false + ], + "CC-BY-NC-ND-2.0": [ + "Creative Commons Attribution Non Commercial No Derivatives 2.0", + false, + false + ], + "CC-BY-NC-ND-2.5": [ + "Creative Commons Attribution Non Commercial No Derivatives 2.5", + false, + false + ], + "CC-BY-NC-ND-3.0": [ + "Creative Commons Attribution Non Commercial No Derivatives 3.0", + false, + false + ], + "CC-BY-NC-ND-4.0": [ + "Creative Commons Attribution Non Commercial No Derivatives 4.0", + false, + false + ], + "CC-BY-NC-SA-1.0": [ + "Creative Commons Attribution Non Commercial Share Alike 1.0", + false, + false + ], + "CC-BY-NC-SA-2.0": [ + "Creative Commons Attribution Non Commercial Share Alike 2.0", + false, + false + ], + "CC-BY-NC-SA-2.5": [ + "Creative Commons Attribution Non Commercial Share Alike 2.5", + false, + false + ], + "CC-BY-NC-SA-3.0": [ + "Creative Commons Attribution Non Commercial Share Alike 3.0", + false, + false + ], + "CC-BY-NC-SA-4.0": [ + "Creative Commons Attribution Non Commercial Share Alike 4.0", + false, + false + ], + "CC-BY-ND-1.0": [ + "Creative Commons Attribution No Derivatives 1.0", + false, + false + ], + "CC-BY-ND-2.0": [ + "Creative Commons Attribution No Derivatives 2.0", + false, + false + ], + "CC-BY-ND-2.5": [ + "Creative Commons Attribution No Derivatives 2.5", + false, + false + ], + "CC-BY-ND-3.0": [ + "Creative Commons Attribution No Derivatives 3.0", + false, + false + ], + "CC-BY-ND-4.0": [ + "Creative Commons Attribution No Derivatives 4.0", + false, + false + ], + "CC-BY-SA-1.0": [ + "Creative Commons Attribution Share Alike 1.0", + false, + false + ], + "CC-BY-SA-2.0": [ + "Creative Commons Attribution Share Alike 2.0", + false, + false + ], + "CC-BY-SA-2.5": [ + "Creative Commons Attribution Share Alike 2.5", + false, + false + ], + "CC-BY-SA-3.0": [ + "Creative Commons Attribution Share Alike 3.0", + false, + false + ], + "CC-BY-SA-4.0": [ + "Creative Commons Attribution Share Alike 4.0", + false, + false + ], + "CC0-1.0": [ + "Creative Commons Zero v1.0 Universal", + false, + false + ], + "CDDL-1.0": [ + "Common Development and Distribution License 1.0", + true, + false + ], + "CDDL-1.1": [ + "Common Development and Distribution License 1.1", + false, + false + ], + "CDLA-Permissive-1.0": [ + "Community Data License Agreement Permissive 1.0", + false, + false + ], + "CDLA-Sharing-1.0": [ + "Community Data License Agreement Sharing 1.0", + false, + false + ], + "CECILL-1.0": [ + "CeCILL Free Software License Agreement v1.0", + false, + false + ], + "CECILL-1.1": [ + "CeCILL Free Software License Agreement v1.1", + false, + false + ], + "CECILL-2.0": [ + "CeCILL Free Software License Agreement v2.0", + false, + false + ], + "CECILL-2.1": [ + "CeCILL Free Software License Agreement v2.1", + true, + false + ], + "CECILL-B": [ + "CeCILL-B Free Software License Agreement", + false, + false + ], + "CECILL-C": [ + "CeCILL-C Free Software License Agreement", + false, + false + ], + "CNRI-Jython": [ + "CNRI Jython License", + false, + false + ], + "CNRI-Python": [ + "CNRI Python License", + true, + false + ], + "CNRI-Python-GPL-Compatible": [ + "CNRI Python Open Source GPL Compatible License Agreement", + false, + false + ], + "CPAL-1.0": [ + "Common Public Attribution License 1.0", + true, + false + ], + "CPL-1.0": [ + "Common Public License 1.0", + true, + false + ], + "CPOL-1.02": [ + "Code Project Open License 1.02", + false, + false + ], + "CUA-OPL-1.0": [ + "CUA Office Public License v1.0", + true, + false + ], + "Caldera": [ + "Caldera License", + false, + false + ], + "ClArtistic": [ + "Clarified Artistic License", + false, + false + ], + "Condor-1.1": [ + "Condor Public License v1.1", + false, + false + ], + "Crossword": [ + "Crossword License", + false, + false + ], + "CrystalStacker": [ + "CrystalStacker License", + false, + false + ], + "Cube": [ + "Cube License", + false, + false + ], + "D-FSL-1.0": [ + "Deutsche Freie Software Lizenz", + false, + false + ], + "DOC": [ + "DOC License", + false, + false + ], + "DSDP": [ + "DSDP License", + false, + false + ], + "Dotseqn": [ + "Dotseqn License", + false, + false + ], + "ECL-1.0": [ + "Educational Community License v1.0", + true, + false + ], + "ECL-2.0": [ + "Educational Community License v2.0", + true, + false + ], + "EFL-1.0": [ + "Eiffel Forum License v1.0", + true, + false + ], + "EFL-2.0": [ + "Eiffel Forum License v2.0", + true, + false + ], + "EPL-1.0": [ + "Eclipse Public License 1.0", + true, + false + ], + "EPL-2.0": [ + "Eclipse Public License 2.0", + true, + false + ], + "EUDatagrid": [ + "EU DataGrid Software License", + true, + false + ], + "EUPL-1.0": [ + "European Union Public License 1.0", + false, + false + ], + "EUPL-1.1": [ + "European Union Public License 1.1", + true, + false + ], + "EUPL-1.2": [ + "European Union Public License 1.2", + true, + false + ], + "Entessa": [ + "Entessa Public License v1.0", + true, + false + ], + "ErlPL-1.1": [ + "Erlang Public License v1.1", + false, + false + ], + "Eurosym": [ + "Eurosym License", + false, + false + ], + "FSFAP": [ + "FSF All Permissive License", + false, + false + ], + "FSFUL": [ + "FSF Unlimited License", + false, + false + ], + "FSFULLR": [ + "FSF Unlimited License (with License Retention)", + false, + false + ], + "FTL": [ + "Freetype Project License", + false, + false + ], + "Fair": [ + "Fair License", + true, + false + ], + "Frameworx-1.0": [ + "Frameworx Open License 1.0", + true, + false + ], + "FreeImage": [ + "FreeImage Public License v1.0", + false, + false + ], + "GFDL-1.1": [ + "GNU Free Documentation License v1.1", + false, + true + ], + "GFDL-1.1-only": [ + "GNU Free Documentation License v1.1 only", + false, + false + ], + "GFDL-1.1-or-later": [ + "GNU Free Documentation License v1.1 or later", + false, + false + ], + "GFDL-1.2": [ + "GNU Free Documentation License v1.2", + false, + true + ], + "GFDL-1.2-only": [ + "GNU Free Documentation License v1.2 only", + false, + false + ], + "GFDL-1.2-or-later": [ + "GNU Free Documentation License v1.2 or later", + false, + false + ], + "GFDL-1.3": [ + "GNU Free Documentation License v1.3", + false, + true + ], + "GFDL-1.3-only": [ + "GNU Free Documentation License v1.3 only", + false, + false + ], + "GFDL-1.3-or-later": [ + "GNU Free Documentation License v1.3 or later", + false, + false + ], + "GL2PS": [ + "GL2PS License", + false, + false + ], + "GPL-1.0": [ + "GNU General Public License v1.0 only", + false, + true + ], + "GPL-1.0+": [ + "GNU General Public License v1.0 or later", + false, + true + ], + "GPL-1.0-only": [ + "GNU General Public License v1.0 only", + false, + false + ], + "GPL-1.0-or-later": [ + "GNU General Public License v1.0 or later", + false, + false + ], + "GPL-2.0": [ + "GNU General Public License v2.0 only", + true, + true + ], + "GPL-2.0+": [ + "GNU General Public License v2.0 or later", + true, + true + ], + "GPL-2.0-only": [ + "GNU General Public License v2.0 only", + true, + false + ], + "GPL-2.0-or-later": [ + "GNU General Public License v2.0 or later", + true, + false + ], + "GPL-2.0-with-GCC-exception": [ + "GNU General Public License v2.0 w/GCC Runtime Library exception", + false, + true + ], + "GPL-2.0-with-autoconf-exception": [ + "GNU General Public License v2.0 w/Autoconf exception", + false, + true + ], + "GPL-2.0-with-bison-exception": [ + "GNU General Public License v2.0 w/Bison exception", + false, + true + ], + "GPL-2.0-with-classpath-exception": [ + "GNU General Public License v2.0 w/Classpath exception", + false, + true + ], + "GPL-2.0-with-font-exception": [ + "GNU General Public License v2.0 w/Font exception", + false, + true + ], + "GPL-3.0": [ + "GNU General Public License v3.0 only", + true, + true + ], + "GPL-3.0+": [ + "GNU General Public License v3.0 or later", + true, + true + ], + "GPL-3.0-only": [ + "GNU General Public License v3.0 only", + true, + false + ], + "GPL-3.0-or-later": [ + "GNU General Public License v3.0 or later", + true, + false + ], + "GPL-3.0-with-GCC-exception": [ + "GNU General Public License v3.0 w/GCC Runtime Library exception", + true, + true + ], + "GPL-3.0-with-autoconf-exception": [ + "GNU General Public License v3.0 w/Autoconf exception", + false, + true + ], + "Giftware": [ + "Giftware License", + false, + false + ], + "Glide": [ + "3dfx Glide License", + false, + false + ], + "Glulxe": [ + "Glulxe License", + false, + false + ], + "HPND": [ + "Historical Permission Notice and Disclaimer", + true, + false + ], + "HaskellReport": [ + "Haskell Language Report License", + false, + false + ], + "IBM-pibs": [ + "IBM PowerPC Initialization and Boot Software", + false, + false + ], + "ICU": [ + "ICU License", + false, + false + ], + "IJG": [ + "Independent JPEG Group License", + false, + false + ], + "IPA": [ + "IPA Font License", + true, + false + ], + "IPL-1.0": [ + "IBM Public License v1.0", + true, + false + ], + "ISC": [ + "ISC License", + true, + false + ], + "ImageMagick": [ + "ImageMagick License", + false, + false + ], + "Imlib2": [ + "Imlib2 License", + false, + false + ], + "Info-ZIP": [ + "Info-ZIP License", + false, + false + ], + "Intel": [ + "Intel Open Source License", + true, + false + ], + "Intel-ACPI": [ + "Intel ACPI Software License Agreement", + false, + false + ], + "Interbase-1.0": [ + "Interbase Public License v1.0", + false, + false + ], + "JSON": [ + "JSON License", + false, + false + ], + "JasPer-2.0": [ + "JasPer License", + false, + false + ], + "LAL-1.2": [ + "Licence Art Libre 1.2", + false, + false + ], + "LAL-1.3": [ + "Licence Art Libre 1.3", + false, + false + ], + "LGPL-2.0": [ + "GNU Library General Public License v2 only", + true, + true + ], + "LGPL-2.0+": [ + "GNU Library General Public License v2 or later", + true, + true + ], + "LGPL-2.0-only": [ + "GNU Library General Public License v2 only", + true, + false + ], + "LGPL-2.0-or-later": [ + "GNU Library General Public License v2 or later", + true, + false + ], + "LGPL-2.1": [ + "GNU Lesser General Public License v2.1 only", + true, + true + ], + "LGPL-2.1+": [ + "GNU Library General Public License v2 or later", + true, + true + ], + "LGPL-2.1-only": [ + "GNU Lesser General Public License v2.1 only", + true, + false + ], + "LGPL-2.1-or-later": [ + "GNU Lesser General Public License v2.1 or later", + true, + false + ], + "LGPL-3.0": [ + "GNU Lesser General Public License v3.0 only", + true, + true + ], + "LGPL-3.0+": [ + "GNU Lesser General Public License v3.0 or later", + true, + true + ], + "LGPL-3.0-only": [ + "GNU Lesser General Public License v3.0 only", + true, + false + ], + "LGPL-3.0-or-later": [ + "GNU Lesser General Public License v3.0 or later", + true, + false + ], + "LGPLLR": [ + "Lesser General Public License For Linguistic Resources", + false, + false + ], + "LPL-1.0": [ + "Lucent Public License Version 1.0", + true, + false + ], + "LPL-1.02": [ + "Lucent Public License v1.02", + true, + false + ], + "LPPL-1.0": [ + "LaTeX Project Public License v1.0", + false, + false + ], + "LPPL-1.1": [ + "LaTeX Project Public License v1.1", + false, + false + ], + "LPPL-1.2": [ + "LaTeX Project Public License v1.2", + false, + false + ], + "LPPL-1.3a": [ + "LaTeX Project Public License v1.3a", + false, + false + ], + "LPPL-1.3c": [ + "LaTeX Project Public License v1.3c", + true, + false + ], + "Latex2e": [ + "Latex2e License", + false, + false + ], + "Leptonica": [ + "Leptonica License", + false, + false + ], + "LiLiQ-P-1.1": [ + "Licence Libre du Québec – Permissive version 1.1", + true, + false + ], + "LiLiQ-R-1.1": [ + "Licence Libre du Québec – Réciprocité version 1.1", + true, + false + ], + "LiLiQ-Rplus-1.1": [ + "Licence Libre du Québec – Réciprocité forte version 1.1", + true, + false + ], + "Libpng": [ + "libpng License", + false, + false + ], + "MIT": [ + "MIT License", + true, + false + ], + "MIT-CMU": [ + "CMU License", + false, + false + ], + "MIT-advertising": [ + "Enlightenment License (e16)", + false, + false + ], + "MIT-enna": [ + "enna License", + false, + false + ], + "MIT-feh": [ + "feh License", + false, + false + ], + "MITNFA": [ + "MIT +no-false-attribs license", + false, + false + ], + "MPL-1.0": [ + "Mozilla Public License 1.0", + true, + false + ], + "MPL-1.1": [ + "Mozilla Public License 1.1", + true, + false + ], + "MPL-2.0": [ + "Mozilla Public License 2.0", + true, + false + ], + "MPL-2.0-no-copyleft-exception": [ + "Mozilla Public License 2.0 (no copyleft exception)", + true, + false + ], + "MS-PL": [ + "Microsoft Public License", + true, + false + ], + "MS-RL": [ + "Microsoft Reciprocal License", + true, + false + ], + "MTLL": [ + "Matrix Template Library License", + false, + false + ], + "MakeIndex": [ + "MakeIndex License", + false, + false + ], + "MirOS": [ + "MirOS License", + true, + false + ], + "Motosoto": [ + "Motosoto License", + true, + false + ], + "Multics": [ + "Multics License", + true, + false + ], + "Mup": [ + "Mup License", + false, + false + ], + "NASA-1.3": [ + "NASA Open Source Agreement 1.3", + true, + false + ], + "NBPL-1.0": [ + "Net Boolean Public License v1", + false, + false + ], + "NCSA": [ + "University of Illinois/NCSA Open Source License", + true, + false + ], + "NGPL": [ + "Nethack General Public License", + true, + false + ], + "NLOD-1.0": [ + "Norwegian Licence for Open Government Data", + false, + false + ], + "NLPL": [ + "No Limit Public License", + false, + false + ], + "NOSL": [ + "Netizen Open Source License", + false, + false + ], + "NPL-1.0": [ + "Netscape Public License v1.0", + false, + false + ], + "NPL-1.1": [ + "Netscape Public License v1.1", + false, + false + ], + "NPOSL-3.0": [ + "Non-Profit Open Software License 3.0", + true, + false + ], + "NRL": [ + "NRL License", + false, + false + ], + "NTP": [ + "NTP License", + true, + false + ], + "Naumen": [ + "Naumen Public License", + true, + false + ], + "Net-SNMP": [ + "Net-SNMP License", + false, + false + ], + "NetCDF": [ + "NetCDF license", + false, + false + ], + "Newsletr": [ + "Newsletr License", + false, + false + ], + "Nokia": [ + "Nokia Open Source License", + true, + false + ], + "Noweb": [ + "Noweb License", + false, + false + ], + "Nunit": [ + "Nunit License", + false, + true + ], + "OCCT-PL": [ + "Open CASCADE Technology Public License", + false, + false + ], + "OCLC-2.0": [ + "OCLC Research Public License 2.0", + true, + false + ], + "ODbL-1.0": [ + "ODC Open Database License v1.0", + false, + false + ], + "OFL-1.0": [ + "SIL Open Font License 1.0", + false, + false + ], + "OFL-1.1": [ + "SIL Open Font License 1.1", + true, + false + ], + "OGTSL": [ + "Open Group Test Suite License", + true, + false + ], + "OLDAP-1.1": [ + "Open LDAP Public License v1.1", + false, + false + ], + "OLDAP-1.2": [ + "Open LDAP Public License v1.2", + false, + false + ], + "OLDAP-1.3": [ + "Open LDAP Public License v1.3", + false, + false + ], + "OLDAP-1.4": [ + "Open LDAP Public License v1.4", + false, + false + ], + "OLDAP-2.0": [ + "Open LDAP Public License v2.0 (or possibly 2.0A and 2.0B)", + false, + false + ], + "OLDAP-2.0.1": [ + "Open LDAP Public License v2.0.1", + false, + false + ], + "OLDAP-2.1": [ + "Open LDAP Public License v2.1", + false, + false + ], + "OLDAP-2.2": [ + "Open LDAP Public License v2.2", + false, + false + ], + "OLDAP-2.2.1": [ + "Open LDAP Public License v2.2.1", + false, + false + ], + "OLDAP-2.2.2": [ + "Open LDAP Public License 2.2.2", + false, + false + ], + "OLDAP-2.3": [ + "Open LDAP Public License v2.3", + false, + false + ], + "OLDAP-2.4": [ + "Open LDAP Public License v2.4", + false, + false + ], + "OLDAP-2.5": [ + "Open LDAP Public License v2.5", + false, + false + ], + "OLDAP-2.6": [ + "Open LDAP Public License v2.6", + false, + false + ], + "OLDAP-2.7": [ + "Open LDAP Public License v2.7", + false, + false + ], + "OLDAP-2.8": [ + "Open LDAP Public License v2.8", + false, + false + ], + "OML": [ + "Open Market License", + false, + false + ], + "OPL-1.0": [ + "Open Public License v1.0", + false, + false + ], + "OSET-PL-2.1": [ + "OSET Public License version 2.1", + true, + false + ], + "OSL-1.0": [ + "Open Software License 1.0", + true, + false + ], + "OSL-1.1": [ + "Open Software License 1.1", + false, + false + ], + "OSL-2.0": [ + "Open Software License 2.0", + true, + false + ], + "OSL-2.1": [ + "Open Software License 2.1", + true, + false + ], + "OSL-3.0": [ + "Open Software License 3.0", + true, + false + ], + "OpenSSL": [ + "OpenSSL License", + false, + false + ], + "PDDL-1.0": [ + "ODC Public Domain Dedication & License 1.0", + false, + false + ], + "PHP-3.0": [ + "PHP License v3.0", + true, + false + ], + "PHP-3.01": [ + "PHP License v3.01", + false, + false + ], + "Plexus": [ + "Plexus Classworlds License", + false, + false + ], + "PostgreSQL": [ + "PostgreSQL License", + true, + false + ], + "Python-2.0": [ + "Python License 2.0", + true, + false + ], + "QPL-1.0": [ + "Q Public License 1.0", + true, + false + ], + "Qhull": [ + "Qhull License", + false, + false + ], + "RHeCos-1.1": [ + "Red Hat eCos Public License v1.1", + false, + false + ], + "RPL-1.1": [ + "Reciprocal Public License 1.1", + true, + false + ], + "RPL-1.5": [ + "Reciprocal Public License 1.5", + true, + false + ], + "RPSL-1.0": [ + "RealNetworks Public Source License v1.0", + true, + false + ], + "RSA-MD": [ + "RSA Message-Digest License ", + false, + false + ], + "RSCPL": [ + "Ricoh Source Code Public License", + true, + false + ], + "Rdisc": [ + "Rdisc License", + false, + false + ], + "Ruby": [ + "Ruby License", + false, + false + ], + "SAX-PD": [ + "Sax Public Domain Notice", + false, + false + ], + "SCEA": [ + "SCEA Shared Source License", + false, + false + ], + "SGI-B-1.0": [ + "SGI Free Software License B v1.0", + false, + false + ], + "SGI-B-1.1": [ + "SGI Free Software License B v1.1", + false, + false + ], + "SGI-B-2.0": [ + "SGI Free Software License B v2.0", + false, + false + ], + "SISSL": [ + "Sun Industry Standards Source License v1.1", + true, + false + ], + "SISSL-1.2": [ + "Sun Industry Standards Source License v1.2", + false, + false + ], + "SMLNJ": [ + "Standard ML of New Jersey License", + false, + false + ], + "SMPPL": [ + "Secure Messaging Protocol Public License", + false, + false + ], + "SNIA": [ + "SNIA Public License 1.1", + false, + false + ], + "SPL-1.0": [ + "Sun Public License v1.0", + true, + false + ], + "SWL": [ + "Scheme Widget Library (SWL) Software License Agreement", + false, + false + ], + "Saxpath": [ + "Saxpath License", + false, + false + ], + "Sendmail": [ + "Sendmail License", + false, + false + ], + "SimPL-2.0": [ + "Simple Public License 2.0", + true, + false + ], + "Sleepycat": [ + "Sleepycat License", + true, + false + ], + "Spencer-86": [ + "Spencer License 86", + false, + false + ], + "Spencer-94": [ + "Spencer License 94", + false, + false + ], + "Spencer-99": [ + "Spencer License 99", + false, + false + ], + "StandardML-NJ": [ + "Standard ML of New Jersey License", + false, + true + ], + "SugarCRM-1.1.3": [ + "SugarCRM Public License v1.1.3", + false, + false + ], + "TCL": [ + "TCL/TK License", + false, + false + ], + "TCP-wrappers": [ + "TCP Wrappers License", + false, + false + ], + "TMate": [ + "TMate Open Source License", + false, + false + ], + "TORQUE-1.1": [ + "TORQUE v2.5+ Software License v1.1", + false, + false + ], + "TOSL": [ + "Trusster Open Source License", + false, + false + ], + "UPL-1.0": [ + "Universal Permissive License v1.0", + true, + false + ], + "Unicode-DFS-2015": [ + "Unicode License Agreement - Data Files and Software (2015)", + false, + false + ], + "Unicode-DFS-2016": [ + "Unicode License Agreement - Data Files and Software (2016)", + false, + false + ], + "Unicode-TOU": [ + "Unicode Terms of Use", + false, + false + ], + "Unlicense": [ + "The Unlicense", + false, + false + ], + "VOSTROM": [ + "VOSTROM Public License for Open Source", + false, + false + ], + "VSL-1.0": [ + "Vovida Software License v1.0", + true, + false + ], + "Vim": [ + "Vim License", + false, + false + ], + "W3C": [ + "W3C Software Notice and License (2002-12-31)", + true, + false + ], + "W3C-19980720": [ + "W3C Software Notice and License (1998-07-20)", + false, + false + ], + "W3C-20150513": [ + "W3C Software Notice and Document License (2015-05-13)", + false, + false + ], + "WTFPL": [ + "Do What The F*ck You Want To Public License", + false, + false + ], + "Watcom-1.0": [ + "Sybase Open Watcom Public License 1.0", + true, + false + ], + "Wsuipa": [ + "Wsuipa License", + false, + false + ], + "X11": [ + "X11 License", + false, + false + ], + "XFree86-1.1": [ + "XFree86 License 1.1", + false, + false + ], + "XSkat": [ + "XSkat License", + false, + false + ], + "Xerox": [ + "Xerox License", + false, + false + ], + "Xnet": [ + "X.Net License", + true, + false + ], + "YPL-1.0": [ + "Yahoo! Public License v1.0", + false, + false + ], + "YPL-1.1": [ + "Yahoo! Public License v1.1", + false, + false + ], + "ZPL-1.1": [ + "Zope Public License 1.1", + false, + false + ], + "ZPL-2.0": [ + "Zope Public License 2.0", + true, + false + ], + "ZPL-2.1": [ + "Zope Public License 2.1", + false, + false + ], + "Zed": [ + "Zed License", + false, + false + ], + "Zend-2.0": [ + "Zend License v2.0", + false, + false + ], + "Zimbra-1.3": [ + "Zimbra Public License v1.3", + false, + false + ], + "Zimbra-1.4": [ + "Zimbra Public License v1.4", + false, + false + ], + "Zlib": [ + "zlib License", + true, + false + ], + "bzip2-1.0.5": [ + "bzip2 and libbzip2 License v1.0.5", + false, + false + ], + "bzip2-1.0.6": [ + "bzip2 and libbzip2 License v1.0.6", + false, + false + ], + "curl": [ + "curl License", + false, + false + ], + "diffmark": [ + "diffmark license", + false, + false + ], + "dvipdfm": [ + "dvipdfm License", + false, + false + ], + "eCos-2.0": [ + "eCos license version 2.0", + false, + true + ], + "eGenix": [ + "eGenix.com Public License 1.1.0", + false, + false + ], + "gSOAP-1.3b": [ + "gSOAP Public License v1.3b", + false, + false + ], + "gnuplot": [ + "gnuplot License", + false, + false + ], + "iMatix": [ + "iMatix Standard Function Library Agreement", + false, + false + ], + "libtiff": [ + "libtiff License", + false, + false + ], + "mpich2": [ + "mpich2 License", + false, + false + ], + "psfrag": [ + "psfrag License", + false, + false + ], + "psutils": [ + "psutils License", + false, + false + ], + "wxWindows": [ + "wxWindows Library License", + false, + true + ], + "xinetd": [ + "xinetd License", + false, + false + ], + "xpp": [ + "XPP License", + false, + false + ], + "zlib-acknowledgement": [ + "zlib/libpng License with Acknowledgement", + false, + false + ] +} diff --git a/src/poetry/core/spdx/helpers.py b/src/poetry/core/spdx/helpers.py new file mode 100644 index 0000000..00d4bc6 --- /dev/null +++ b/src/poetry/core/spdx/helpers.py @@ -0,0 +1,50 @@ +from __future__ import annotations + +import functools +import json +import os + +from poetry.core.spdx.license import License + + +def license_by_id(identifier: str) -> License: + if not identifier: + raise ValueError("A license identifier is required") + + licenses = _load_licenses() + return licenses.get( + identifier.lower(), License(identifier, identifier, False, False) + ) + + +@functools.lru_cache() +def _load_licenses() -> dict[str, License]: + licenses = {} + licenses_file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") + + with open(licenses_file, encoding="utf-8") as f: + data = json.loads(f.read()) + + for name, license_info in data.items(): + license = License(name, license_info[0], license_info[1], license_info[2]) + licenses[name.lower()] = license + + full_name = license_info[0].lower() + if full_name in licenses: + existing_license = licenses[full_name] + if not existing_license.is_deprecated: + continue + + licenses[full_name] = license + + # Add a Proprietary license for non-standard licenses + licenses["proprietary"] = License("Proprietary", "Proprietary", False, False) + + return licenses + + +if __name__ == "__main__": + from poetry.core.spdx.updater import Updater + + updater = Updater() + updater.dump() diff --git a/src/poetry/core/spdx/license.py b/src/poetry/core/spdx/license.py new file mode 100644 index 0000000..901a1cb --- /dev/null +++ b/src/poetry/core/spdx/license.py @@ -0,0 +1,162 @@ +from __future__ import annotations + +from collections import namedtuple + + +class License(namedtuple("License", "id name is_osi_approved is_deprecated")): + id: str + name: str + is_osi_approved: bool + is_deprecated: bool + + CLASSIFIER_SUPPORTED = { + # Not OSI Approved + "Aladdin", + "CC0-1.0", + "CECILL-B", + "CECILL-C", + "NPL-1.0", + "NPL-1.1", + # OSI Approved + "AFPL", + "AFL-1.1", + "AFL-1.2", + "AFL-2.0", + "AFL-2.1", + "AFL-3.0", + "Apache-1.1", + "Apache-2.0", + "APSL-1.1", + "APSL-1.2", + "APSL-2.0", + "Artistic-1.0", + "Artistic-2.0", + "AAL", + "AGPL-3.0", + "AGPL-3.0-only", + "AGPL-3.0-or-later", + "BSL-1.0", + "BSD-2-Clause", + "BSD-3-Clause", + "CDDL-1.0", + "CECILL-2.1", + "CPL-1.0", + "EFL-1.0", + "EFL-2.0", + "EPL-1.0", + "EPL-2.0", + "EUPL-1.1", + "EUPL-1.2", + "GPL-2.0", + "GPL-2.0+", + "GPL-2.0-only", + "GPL-2.0-or-later", + "GPL-3.0", + "GPL-3.0+", + "GPL-3.0-only", + "GPL-3.0-or-later", + "LGPL-2.0", + "LGPL-2.0+", + "LGPL-2.0-only", + "LGPL-2.0-or-later", + "LGPL-3.0", + "LGPL-3.0+", + "LGPL-3.0-only", + "LGPL-3.0-or-later", + "MIT", + "MPL-1.0", + "MPL-1.1", + "MPL-1.2", + "Nokia", + "W3C", + "ZPL-1.0", + "ZPL-2.0", + "ZPL-2.1", + } + + CLASSIFIER_NAMES = { + # Not OSI Approved + "AFPL": "Aladdin Free Public License (AFPL)", + "CC0-1.0": "CC0 1.0 Universal (CC0 1.0) Public Domain Dedication", + "CECILL-B": "CeCILL-B Free Software License Agreement (CECILL-B)", + "CECILL-C": "CeCILL-C Free Software License Agreement (CECILL-C)", + "NPL-1.0": "Netscape Public License (NPL)", + "NPL-1.1": "Netscape Public License (NPL)", + # OSI Approved + "AFL-1.1": "Academic Free License (AFL)", + "AFL-1.2": "Academic Free License (AFL)", + "AFL-2.0": "Academic Free License (AFL)", + "AFL-2.1": "Academic Free License (AFL)", + "AFL-3.0": "Academic Free License (AFL)", + "Apache-1.1": "Apache Software License", + "Apache-2.0": "Apache Software License", + "APSL-1.1": "Apple Public Source License", + "APSL-1.2": "Apple Public Source License", + "APSL-2.0": "Apple Public Source License", + "Artistic-1.0": "Artistic License", + "Artistic-2.0": "Artistic License", + "AAL": "Attribution Assurance License", + "AGPL-3.0": "GNU Affero General Public License v3", + "AGPL-3.0-only": "GNU Affero General Public License v3", + "AGPL-3.0-or-later": "GNU Affero General Public License v3 or later (AGPLv3+)", + "BSL-1.0": "Boost Software License 1.0 (BSL-1.0)", + "BSD-2-Clause": "BSD License", + "BSD-3-Clause": "BSD License", + "CDDL-1.0": "Common Development and Distribution License 1.0 (CDDL-1.0)", + "CECILL-2.1": "CEA CNRS Inria Logiciel Libre License, version 2.1 (CeCILL-2.1)", + "CPL-1.0": "Common Public License", + "EPL-1.0": "Eclipse Public License 1.0 (EPL-1.0)", + "EFL-1.0": "Eiffel Forum License", + "EFL-2.0": "Eiffel Forum License", + "EUPL-1.1": "European Union Public Licence 1.1 (EUPL 1.1)", + "EUPL-1.2": "European Union Public Licence 1.2 (EUPL 1.2)", + "GPL-2.0": "GNU General Public License v2 (GPLv2)", + "GPL-2.0-only": "GNU General Public License v2 (GPLv2)", + "GPL-2.0+": "GNU General Public License v2 or later (GPLv2+)", + "GPL-2.0-or-later": "GNU General Public License v2 or later (GPLv2+)", + "GPL-3.0": "GNU General Public License v3 (GPLv3)", + "GPL-3.0-only": "GNU General Public License v3 (GPLv3)", + "GPL-3.0+": "GNU General Public License v3 or later (GPLv3+)", + "GPL-3.0-or-later": "GNU General Public License v3 or later (GPLv3+)", + "LGPL-2.0": "GNU Lesser General Public License v2 (LGPLv2)", + "LGPL-2.0-only": "GNU Lesser General Public License v2 (LGPLv2)", + "LGPL-2.0+": "GNU Lesser General Public License v2 or later (LGPLv2+)", + "LGPL-2.0-or-later": "GNU Lesser General Public License v2 or later (LGPLv2+)", + "LGPL-3.0": "GNU Lesser General Public License v3 (LGPLv3)", + "LGPL-3.0-only": "GNU Lesser General Public License v3 (LGPLv3)", + "LGPL-3.0+": "GNU Lesser General Public License v3 or later (LGPLv3+)", + "LGPL-3.0-or-later": "GNU Lesser General Public License v3 or later (LGPLv3+)", + "MPL-1.0": "Mozilla Public License 1.0 (MPL)", + "MPL-1.1": "Mozilla Public License 1.1 (MPL 1.1)", + "MPL-2.0": "Mozilla Public License 2.0 (MPL 2.0)", + "W3C": "W3C License", + "ZPL-1.1": "Zope Public License", + "ZPL-2.0": "Zope Public License", + "ZPL-2.1": "Zope Public License", + } + + @property + def classifier(self) -> str: + parts = ["License"] + + if self.is_osi_approved: + parts.append("OSI Approved") + + name = self.classifier_name + if name is not None: + parts.append(name) + + return " :: ".join(parts) + + @property + def classifier_name(self) -> str | None: + if self.id not in self.CLASSIFIER_SUPPORTED: + if self.is_osi_approved: + return None + + return "Other/Proprietary License" + + if self.id in self.CLASSIFIER_NAMES: + return self.CLASSIFIER_NAMES[self.id] + + return self.name diff --git a/src/poetry/core/spdx/updater.py b/src/poetry/core/spdx/updater.py new file mode 100644 index 0000000..9f6ff37 --- /dev/null +++ b/src/poetry/core/spdx/updater.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import json +import os + +from typing import Any +from urllib.request import urlopen + + +class Updater: + BASE_URL = "https://raw.githubusercontent.com/spdx/license-list-data/master/json/" + + def __init__(self, base_url: str = BASE_URL) -> None: + self._base_url = base_url + + def dump(self, file: str | None = None) -> None: + if file is None: + file = os.path.join(os.path.dirname(__file__), "data", "licenses.json") + + licenses_url = self._base_url + "licenses.json" + + with open(file, "w", encoding="utf-8") as f: + f.write( + json.dumps(self.get_licenses(licenses_url), indent=2, sort_keys=True) + ) + + def get_licenses(self, url: str) -> dict[str, Any]: + licenses = {} + with urlopen(url) as r: + data = json.loads(r.read().decode()) + + for info in data["licenses"]: + licenses[info["licenseId"]] = [ + info["name"], + info["isOsiApproved"], + info["isDeprecatedLicenseId"], + ] + + return licenses diff --git a/src/poetry/core/toml/__init__.py b/src/poetry/core/toml/__init__.py new file mode 100644 index 0000000..3ce1689 --- /dev/null +++ b/src/poetry/core/toml/__init__.py @@ -0,0 +1,7 @@ +from __future__ import annotations + +from poetry.core.toml.exceptions import TOMLError +from poetry.core.toml.file import TOMLFile + + +__all__ = ["TOMLError", "TOMLFile"] diff --git a/src/poetry/core/toml/exceptions.py b/src/poetry/core/toml/exceptions.py new file mode 100644 index 0000000..efa189c --- /dev/null +++ b/src/poetry/core/toml/exceptions.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from tomlkit.exceptions import TOMLKitError + +from poetry.core.exceptions import PoetryCoreException + + +class TOMLError(TOMLKitError, PoetryCoreException): # type: ignore[misc] + pass diff --git a/src/poetry/core/toml/file.py b/src/poetry/core/toml/file.py new file mode 100644 index 0000000..7bc1cd7 --- /dev/null +++ b/src/poetry/core/toml/file.py @@ -0,0 +1,42 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any + +from tomlkit.toml_file import TOMLFile as BaseTOMLFile + + +if TYPE_CHECKING: + from tomlkit.toml_document import TOMLDocument + + +class TOMLFile(BaseTOMLFile): # type: ignore[misc] + def __init__(self, path: str | Path) -> None: + if isinstance(path, str): + path = Path(path) + super().__init__(path.as_posix()) + self.__path = path + + @property + def path(self) -> Path: + return self.__path + + def exists(self) -> bool: + return self.__path.exists() + + def read(self) -> TOMLDocument: + from tomlkit.exceptions import TOMLKitError + + from poetry.core.toml import TOMLError + + try: + return super().read() + except (ValueError, TOMLKitError) as e: + raise TOMLError(f"Invalid TOML file {self.path.as_posix()}: {e}") + + def __getattr__(self, item: str) -> Any: + return getattr(self.__path, item) + + def __str__(self) -> str: + return self.__path.as_posix() diff --git a/src/poetry/core/utils/__init__.py b/src/poetry/core/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/utils/_compat.py b/src/poetry/core/utils/_compat.py new file mode 100644 index 0000000..7b3f59e --- /dev/null +++ b/src/poetry/core/utils/_compat.py @@ -0,0 +1,6 @@ +from __future__ import annotations + +import sys + + +WINDOWS = sys.platform == "win32" diff --git a/src/poetry/core/utils/helpers.py b/src/poetry/core/utils/helpers.py new file mode 100644 index 0000000..dd41b24 --- /dev/null +++ b/src/poetry/core/utils/helpers.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +import os +import shutil +import stat +import tempfile +import unicodedata +import warnings + +from contextlib import contextmanager +from pathlib import Path +from typing import Any +from typing import Iterator + +from packaging.utils import canonicalize_name + +from poetry.core.version.pep440 import PEP440Version + + +def combine_unicode(string: str) -> str: + return unicodedata.normalize("NFC", string) + + +def module_name(name: str) -> str: + return canonicalize_name(name).replace("-", "_") + + +def normalize_version(version: str) -> str: + warnings.warn( + "normalize_version() is deprecated. Use Version.parse().to_string() instead.", + DeprecationWarning, + stacklevel=2, + ) + return PEP440Version.parse(version).to_string() + + +@contextmanager +def temporary_directory(*args: Any, **kwargs: Any) -> Iterator[str]: + name = tempfile.mkdtemp(*args, **kwargs) + yield name + safe_rmtree(name) + + +def parse_requires(requires: str) -> list[str]: + lines = requires.split("\n") + + requires_dist = [] + in_section = False + current_marker = None + for line in lines: + line = line.strip() + if not line: + if in_section: + in_section = False + + continue + + if line.startswith("["): + # extras or conditional dependencies + marker = line.lstrip("[").rstrip("]") + if ":" not in marker: + extra, marker = marker, "" + else: + extra, marker = marker.split(":") + + if extra: + if marker: + marker = f'{marker} and extra == "{extra}"' + else: + marker = f'extra == "{extra}"' + + if marker: + current_marker = marker + + continue + + if current_marker: + line = f"{line} ; {current_marker}" + + requires_dist.append(line) + + return requires_dist + + +def _on_rm_error(func: Any, path: str | Path, exc_info: Any) -> None: + if not os.path.exists(path): + return + + os.chmod(path, stat.S_IWRITE) + func(path) + + +def safe_rmtree(path: str | Path) -> None: + if Path(path).is_symlink(): + return os.unlink(str(path)) + + shutil.rmtree(path, onerror=_on_rm_error) + + +def readme_content_type(path: str | Path) -> str: + suffix = Path(path).suffix + if suffix == ".rst": + return "text/x-rst" + elif suffix in [".md", ".markdown"]: + return "text/markdown" + else: + return "text/plain" diff --git a/src/poetry/core/utils/patterns.py b/src/poetry/core/utils/patterns.py new file mode 100644 index 0000000..c2d9d9b --- /dev/null +++ b/src/poetry/core/utils/patterns.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +import re + + +wheel_file_re = re.compile( + r"""^(?P(?P.+?)(-(?P\d.+?))?) + ((-(?P\d.*?))?-(?P.+?)-(?P.+?)-(?P.+?) + \.whl|\.dist-info)$""", + re.VERBOSE, +) diff --git a/src/poetry/core/utils/toml_file.py b/src/poetry/core/utils/toml_file.py new file mode 100644 index 0000000..5e64029 --- /dev/null +++ b/src/poetry/core/utils/toml_file.py @@ -0,0 +1,20 @@ +from __future__ import annotations + +from typing import Any + +from poetry.core.toml import TOMLFile + + +class TomlFile(TOMLFile): + @classmethod + def __new__(cls: type[TOMLFile], *args: Any, **kwargs: Any) -> TomlFile: + import warnings + + this_import = f"{cls.__module__}.{cls.__name__}" + new_import = f"{TOMLFile.__module__}.{TOMLFile.__name__}" + warnings.warn( + f"Use of {this_import} has been deprecated, use {new_import} instead.", + category=DeprecationWarning, + stacklevel=2, + ) + return super().__new__(cls) # type: ignore[no-any-return,misc] diff --git a/src/poetry/core/vcs/__init__.py b/src/poetry/core/vcs/__init__.py new file mode 100644 index 0000000..f4096ec --- /dev/null +++ b/src/poetry/core/vcs/__init__.py @@ -0,0 +1,35 @@ +from __future__ import annotations + +import os +import subprocess + +from pathlib import Path + +from poetry.core.vcs.git import Git + + +def get_vcs(directory: Path) -> Git | None: + working_dir = Path.cwd() + os.chdir(str(directory.resolve())) + + vcs: Git | None + + try: + from poetry.core.vcs.git import executable + + git_dir = ( + subprocess.check_output( + [executable(), "rev-parse", "--show-toplevel"], stderr=subprocess.STDOUT + ) + .decode() + .strip() + ) + + vcs = Git(Path(git_dir)) + + except (subprocess.CalledProcessError, OSError, RuntimeError): + vcs = None + finally: + os.chdir(str(working_dir)) + + return vcs diff --git a/src/poetry/core/vcs/git.py b/src/poetry/core/vcs/git.py new file mode 100644 index 0000000..aeccdf3 --- /dev/null +++ b/src/poetry/core/vcs/git.py @@ -0,0 +1,384 @@ +from __future__ import annotations + +import re +import subprocess + +from collections import namedtuple +from pathlib import Path +from typing import Any + +from poetry.core.utils._compat import WINDOWS + + +PROTOCOL = r"\w+" +USER = r"[a-zA-Z0-9_.-]+" +RESOURCE = r"[a-zA-Z0-9_.-]+" +PORT = r"\d+" +PATH = r"[\w~.\-/\\\$]+" +NAME = r"[\w~.\-]+" +REV = r"[^@#]+?" +SUBDIR = r"[\w\-/\\]+" + +PATTERNS = [ + re.compile( + r"^(git\+)?" + r"(?Phttps?|git|ssh|rsync|file)://" + rf"(?:(?P{USER})@)?" + rf"(?P{RESOURCE})?" + rf"(:(?P{PORT}))?" + rf"(?P[:/\\]({PATH}[/\\])?" + rf"((?P{NAME}?)(\.git|[/\\])?)?)" + r"(?:" + r"#egg=?.+" + r"|" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + rf"[@#](?P{REV})(?:[&#](?:egg=.+?|(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})))?" + r")?" + r"$" + ), + re.compile( + r"(git\+)?" + rf"((?P{PROTOCOL})://)" + rf"(?:(?P{USER})@)?" + rf"(?P{RESOURCE}:?)" + rf"(:(?P{PORT}))?" + rf"(?P({PATH})" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + r"#egg=?.+" + r"|" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + rf"[@#](?P{REV})(?:[&#](?:egg=.+?|(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})))?" + r")?" + r"$" + ), + re.compile( + rf"^(?:(?P{USER})@)?" + rf"(?P{RESOURCE})" + rf"(:(?P{PORT}))?" + rf"(?P([:/]{PATH}/)" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + r"#egg=.+?" + r"|" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + rf"[@#](?P{REV})(?:[&#](?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR}))?" + r")?" + r"$" + ), + re.compile( + rf"((?P{USER})@)?" + rf"(?P{RESOURCE})" + r"[:/]{{1,2}}" + rf"(?P({PATH})" + rf"(?P{NAME})(\.git|/)?)" + r"(?:" + r"#egg=?.+" + r"|" + rf"#(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})" + r"|" + rf"[@#](?P{REV})(?:[&#](?:egg=.+?|(?:egg=.+?&subdirectory=|subdirectory=)(?P{SUBDIR})))?" + r")?" + r"$" + ), +] + + +class GitError(RuntimeError): + pass + + +class ParsedUrl: + def __init__( + self, + protocol: str | None, + resource: str | None, + pathname: str | None, + user: str | None, + port: str | None, + name: str | None, + rev: str | None, + subdirectory: str | None = None, + ) -> None: + self.protocol = protocol + self.resource = resource + self.pathname = pathname + self.user = user + self.port = port + self.name = name + self.rev = rev + self.subdirectory = subdirectory + + @classmethod + def parse(cls, url: str) -> ParsedUrl: + for pattern in PATTERNS: + m = pattern.match(url) + if m: + groups = m.groupdict() + return ParsedUrl( + groups.get("protocol"), + groups.get("resource"), + groups.get("pathname"), + groups.get("user"), + groups.get("port"), + groups.get("name"), + groups.get("rev"), + groups.get("rev_subdirectory") or groups.get("subdirectory"), + ) + + raise ValueError(f'Invalid git url "{url}"') + + @property + def url(self) -> str: + protocol = f"{self.protocol}://" if self.protocol else "" + user = f"{self.user}@" if self.user else "" + port = f":{self.port}" if self.port else "" + path = "/" + (self.pathname or "").lstrip(":/") + return f"{protocol}{user}{self.resource}{port}{path}" + + def format(self) -> str: + return self.url + + def __str__(self) -> str: + return self.format() + + +GitUrl = namedtuple("GitUrl", ["url", "revision", "subdirectory"]) + + +_executable: str | None = None + + +def executable() -> str: + global _executable + + if _executable is not None: + return _executable + + if WINDOWS: + # Finding git via where.exe + where = "%WINDIR%\\System32\\where.exe" + paths = subprocess.check_output( + [where, "git"], shell=True, encoding="oem" + ).split("\n") + for path in paths: + if not path: + continue + + _path = Path(path.strip()) + try: + _path.relative_to(Path.cwd()) + except ValueError: + _executable = str(_path) + + break + else: + _executable = "git" + + if _executable is None: + raise RuntimeError("Unable to find a valid git executable") + + return _executable + + +def _reset_executable() -> None: + global _executable + + _executable = None + + +class GitConfig: + def __init__(self, requires_git_presence: bool = False) -> None: + self._config = {} + + try: + config_list = subprocess.check_output( + [executable(), "config", "-l"], stderr=subprocess.STDOUT + ).decode() + + m = re.findall("(?ms)^([^=]+)=(.*?)$", config_list) + if m: + for group in m: + self._config[group[0]] = group[1] + except (subprocess.CalledProcessError, OSError): + if requires_git_presence: + raise + + def get(self, key: Any, default: Any | None = None) -> Any: + return self._config.get(key, default) + + def __getitem__(self, item: Any) -> Any: + return self._config[item] + + +class Git: + def __init__(self, work_dir: Path | None = None) -> None: + self._config = GitConfig(requires_git_presence=True) + self._work_dir = work_dir + + @classmethod + def normalize_url(cls, url: str) -> GitUrl: + parsed = ParsedUrl.parse(url) + + formatted = re.sub(r"^git\+", "", url) + if parsed.rev: + formatted = re.sub(rf"[#@]{parsed.rev}(?=[#&]?)(?!\=)", "", formatted) + + if parsed.subdirectory: + formatted = re.sub( + rf"[#&]subdirectory={parsed.subdirectory}$", "", formatted + ) + + altered = parsed.format() != formatted + + if altered: + if re.match(r"^git\+https?", url) and re.match( + r"^/?:[^0-9]", parsed.pathname or "" + ): + normalized = re.sub(r"git\+(.*:[^:]+):(.*)", "\\1/\\2", url) + elif re.match(r"^git\+file", url): + normalized = re.sub(r"git\+", "", url) + else: + normalized = re.sub(r"^(?:git\+)?ssh://", "", url) + else: + normalized = parsed.format() + + return GitUrl( + re.sub(r"#[^#]*$", "", normalized), parsed.rev, parsed.subdirectory + ) + + @property + def config(self) -> GitConfig: + return self._config + + @property + def version(self) -> tuple[int, int, int]: + output = self.run("version") + version = re.search(r"(\d+)\.(\d+)\.(\d+)", output) + if not version: + return (0, 0, 0) + return int(version.group(1)), int(version.group(2)), int(version.group(3)) + + def clone(self, repository: str, dest: Path) -> str: + self._check_parameter(repository) + cmd = [ + "clone", + "--filter=blob:none", + "--recurse-submodules", + "--", + repository, + str(dest), + ] + # Blobless clones introduced in Git 2.17 + if self.version < (2, 17): + cmd.remove("--filter=blob:none") + return self.run(*cmd) + + def checkout(self, rev: str, folder: Path | None = None) -> str: + args = [] + if folder is None and self._work_dir: + folder = self._work_dir + + if folder: + args += [ + "--git-dir", + (folder / ".git").as_posix(), + "--work-tree", + folder.as_posix(), + ] + + self._check_parameter(rev) + + args += ["checkout", "--recurse-submodules", rev] + + return self.run(*args) + + def rev_parse(self, rev: str, folder: Path | None = None) -> str: + args = [] + if folder is None and self._work_dir: + folder = self._work_dir + + self._check_parameter(rev) + + # We need "^0" (an alternative to "^{commit}") to ensure that the + # commit SHA of the commit the tag points to is returned, even in + # the case of annotated tags. + # + # We deliberately avoid the "^{commit}" syntax itself as on some + # platforms (cygwin/msys to be specific), the braces are interpreted + # as special characters and would require escaping, while on others + # they should not be escaped. + args += ["rev-parse", rev + "^0"] + + return self.run(*args, folder=folder) + + def get_current_branch(self, folder: Path | None = None) -> str: + if folder is None and self._work_dir: + folder = self._work_dir + + output = self.run("symbolic-ref", "--short", "HEAD", folder=folder) + + return output.strip() + + def get_ignored_files(self, folder: Path | None = None) -> list[str]: + args = [] + if folder is None and self._work_dir: + folder = self._work_dir + + if folder: + args += [ + "--git-dir", + (folder / ".git").as_posix(), + "--work-tree", + folder.as_posix(), + ] + + args += ["ls-files", "--others", "-i", "--exclude-standard"] + output = self.run(*args) + + return output.strip().split("\n") + + def remote_urls(self, folder: Path | None = None) -> dict[str, str]: + output = self.run( + "config", "--get-regexp", r"remote\..*\.url", folder=folder + ).strip() + + urls = {} + for url in output.splitlines(): + name, url = url.split(" ", 1) + urls[name.strip()] = url.strip() + + return urls + + def remote_url(self, folder: Path | None = None) -> str: + urls = self.remote_urls(folder=folder) + + return urls.get("remote.origin.url", urls[list(urls.keys())[0]]) + + def run(self, *args: Any, **kwargs: Any) -> str: + folder = kwargs.pop("folder", None) + if folder: + args = ( + "--git-dir", + (folder / ".git").as_posix(), + "--work-tree", + folder.as_posix(), + ) + args + + return ( + subprocess.check_output( + [executable()] + list(args), stderr=subprocess.STDOUT + ) + .decode() + .strip() + ) + + def _check_parameter(self, parameter: str) -> None: + """ + Checks a git parameter to avoid unwanted code execution. + """ + if parameter.strip().startswith("-"): + raise GitError(f"Invalid Git parameter: {parameter}") diff --git a/src/poetry/core/version/__init__.py b/src/poetry/core/version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/src/poetry/core/version/exceptions.py b/src/poetry/core/version/exceptions.py new file mode 100644 index 0000000..752fada --- /dev/null +++ b/src/poetry/core/version/exceptions.py @@ -0,0 +1,5 @@ +from __future__ import annotations + + +class InvalidVersion(ValueError): + pass diff --git a/src/poetry/core/version/grammars/__init__.py b/src/poetry/core/version/grammars/__init__.py new file mode 100644 index 0000000..caf504b --- /dev/null +++ b/src/poetry/core/version/grammars/__init__.py @@ -0,0 +1,10 @@ +from __future__ import annotations + +from pathlib import Path + + +GRAMMAR_DIR = Path(__file__).parent + +GRAMMAR_PEP_508_CONSTRAINTS = GRAMMAR_DIR / "pep508.lark" + +GRAMMAR_PEP_508_MARKERS = GRAMMAR_DIR / "markers.lark" diff --git a/src/poetry/core/version/grammars/markers.lark b/src/poetry/core/version/grammars/markers.lark new file mode 100644 index 0000000..e0079c2 --- /dev/null +++ b/src/poetry/core/version/grammars/markers.lark @@ -0,0 +1,36 @@ +start: marker + +marker: _atom (BOOL_OP _atom)* +_atom: item | (L_PAREN marker R_PAREN) +item: (MARKER_NAME MARKER_OP _marker_value) | (_marker_value MARKER_OP MARKER_NAME) +_marker_value: SINGLE_QUOTED_STRING | ESCAPED_STRING + +MARKER_NAME: "implementation_version" + | "platform_python_implementation" + | "implementation_name" + | "python_full_version" + | "platform_release" + | "platform_version" + | "platform_machine" + | "platform_system" + | "python_version" + | "sys_platform" + | "os_name" + | "os.name" + | "sys.platform" + | "platform.version" + | "platform.machine" + | "platform.python_implementation" + | "python_implementation" + | "extra" +MARKER_OP: "===" | "==" | ">=" | "<=" | ">" | "<" | "!=" | "~=" | "not in" | "in" +SINGLE_QUOTED_STRING: /'([^'])*'/ +QUOTED_STRING: /"([^"])*"/ +MARKER_VALUE: /(.+?)/ +BOOL_OP: "and" | "or" +L_PAREN: "(" +R_PAREN: ")" + +%import common.WS_INLINE +%import common.ESCAPED_STRING +%ignore WS_INLINE diff --git a/src/poetry/core/version/grammars/pep508.lark b/src/poetry/core/version/grammars/pep508.lark new file mode 100644 index 0000000..0f32ff3 --- /dev/null +++ b/src/poetry/core/version/grammars/pep508.lark @@ -0,0 +1,29 @@ +start: _requirement + +_requirement: _full_name (_MARKER_SEPARATOR marker_spec)? +_full_name: NAME _extras? (version_specification | _url)? +_extras: _L_BRACKET _extra? _R_BRACKET +_extra: EXTRA (_COMMA EXTRA)* +version_specification: (_version_many | _L_PAREN _version_many _R_PAREN) +_version_many: _single_version (_COMMA _single_version)* +_single_version: LEGACY_VERSION_CONSTRAINT +_url: _AT URI +marker_spec: marker + +NAME: /[a-zA-Z][a-zA-Z0-9-_.]*/ +FULL_NAME: NAME +EXTRA: NAME +VERSION_CONSTRAINT: /(~=|==|!=|<=|>=|<|>|===)((?:(?<====)\s*[^\s]*)|(?:(?<===|!=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)*(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:(?:[-_.]?dev[-_.]?[0-9]*)?(?:\+[a-z0-9]+(?:[-_.][a-z0-9]+)*)? # local|\.\*)?)|(?:(?<=~=)\s*v?(?:[0-9]+!)?[0-9]+(?:\.[0-9]+)+(?:[-_.]?(a|b|c|rc|alpha|beta|pre|preview)[-_.]?[0-9]*)?(?:(?:-[0-9]+)|(?:[-_.]?(post|rev|r)[-_.]?[0-9]*))?(?:[-_.]?dev[-_.]?[0-9]*)?)|(?:(?=|<|>)\s*[^,;\s)]*/i +URI: /[^ ]+/ +_MARKER_SEPARATOR: ";" +_L_PAREN: "(" +_R_PAREN: ")" +_L_BRACKET: "[" +_R_BRACKET: "]" +_COMMA: "," +_AT: "@" + +%import .markers.marker +%import common.WS_INLINE +%ignore WS_INLINE diff --git a/src/poetry/core/version/helpers.py b/src/poetry/core/version/helpers.py new file mode 100644 index 0000000..7e72ecc --- /dev/null +++ b/src/poetry/core/version/helpers.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionUnion +from poetry.core.constraints.version import parse_constraint + + +if TYPE_CHECKING: + from poetry.core.constraints.version import VersionConstraint + +PYTHON_VERSION = [ + "2.7.*", + "3.0.*", + "3.1.*", + "3.2.*", + "3.3.*", + "3.4.*", + "3.5.*", + "3.6.*", + "3.7.*", + "3.8.*", + "3.9.*", + "3.10.*", + "3.11.*", +] + + +def format_python_constraint(constraint: VersionConstraint) -> str: + """ + This helper will help in transforming + disjunctive constraint into proper constraint. + """ + if isinstance(constraint, Version): + if constraint.precision >= 3: + return f"=={str(constraint)}" + + # Transform 3.6 or 3 + if constraint.precision == 2: + # 3.6 + constraint = parse_constraint(f"~{constraint.major}.{constraint.minor}") + else: + constraint = parse_constraint(f"^{constraint.major}.0") + + if not isinstance(constraint, VersionUnion): + return str(constraint) + + formatted = [] + accepted = [] + + for version in PYTHON_VERSION: + version_constraint = parse_constraint(version) + matches = constraint.allows_any(version_constraint) + if not matches: + formatted.append("!=" + version) + else: + accepted.append(version) + + # Checking lower bound + low = accepted[0] + + formatted.insert(0, ">=" + ".".join(low.split(".")[:2])) + + return ", ".join(formatted) diff --git a/src/poetry/core/version/markers.py b/src/poetry/core/version/markers.py new file mode 100644 index 0000000..7f1b642 --- /dev/null +++ b/src/poetry/core/version/markers.py @@ -0,0 +1,926 @@ +from __future__ import annotations + +import itertools +import re + +from typing import TYPE_CHECKING +from typing import Any +from typing import Callable +from typing import Iterable + +from poetry.core.constraints.version import VersionConstraint +from poetry.core.version.grammars import GRAMMAR_PEP_508_MARKERS +from poetry.core.version.parser import Parser + + +if TYPE_CHECKING: + from lark import Tree + + from poetry.core.constraints.generic import BaseConstraint + + +class InvalidMarker(ValueError): + """ + An invalid marker was found, users should refer to PEP 508. + """ + + +class UndefinedComparison(ValueError): + """ + An invalid operation was attempted on a value that doesn't support it. + """ + + +class UndefinedEnvironmentName(ValueError): + """ + A name was attempted to be used that does not exist inside of the + environment. + """ + + +ALIASES = { + "os.name": "os_name", + "sys.platform": "sys_platform", + "platform.version": "platform_version", + "platform.machine": "platform_machine", + "platform.python_implementation": "platform_python_implementation", + "python_implementation": "platform_python_implementation", +} + +PYTHON_VERSION_MARKERS = {"python_version", "python_full_version"} + +# Parser: PEP 508 Environment Markers +_parser = Parser(GRAMMAR_PEP_508_MARKERS, "lalr") + + +class BaseMarker: + def intersect(self, other: BaseMarker) -> BaseMarker: + raise NotImplementedError() + + def union(self, other: BaseMarker) -> BaseMarker: + raise NotImplementedError() + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return False + + def validate(self, environment: dict[str, Any] | None) -> bool: + raise NotImplementedError() + + def without_extras(self) -> BaseMarker: + raise NotImplementedError() + + def exclude(self, marker_name: str) -> BaseMarker: + raise NotImplementedError() + + def only(self, *marker_names: str) -> BaseMarker: + raise NotImplementedError() + + def invert(self) -> BaseMarker: + raise NotImplementedError() + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} {str(self)}>" + + +class AnyMarker(BaseMarker): + def intersect(self, other: BaseMarker) -> BaseMarker: + return other + + def union(self, other: BaseMarker) -> BaseMarker: + return self + + def is_any(self) -> bool: + return True + + def is_empty(self) -> bool: + return False + + def validate(self, environment: dict[str, Any] | None) -> bool: + return True + + def without_extras(self) -> BaseMarker: + return self + + def exclude(self, marker_name: str) -> BaseMarker: + return self + + def only(self, *marker_names: str) -> BaseMarker: + return self + + def invert(self) -> EmptyMarker: + return EmptyMarker() + + def __str__(self) -> str: + return "" + + def __repr__(self) -> str: + return "" + + def __hash__(self) -> int: + return hash(("", "")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BaseMarker): + return NotImplemented + + return isinstance(other, AnyMarker) + + +class EmptyMarker(BaseMarker): + def intersect(self, other: BaseMarker) -> BaseMarker: + return self + + def union(self, other: BaseMarker) -> BaseMarker: + return other + + def is_any(self) -> bool: + return False + + def is_empty(self) -> bool: + return True + + def validate(self, environment: dict[str, Any] | None) -> bool: + return False + + def without_extras(self) -> BaseMarker: + return self + + def exclude(self, marker_name: str) -> EmptyMarker: + return self + + def only(self, *marker_names: str) -> EmptyMarker: + return self + + def invert(self) -> AnyMarker: + return AnyMarker() + + def __str__(self) -> str: + return "" + + def __repr__(self) -> str: + return "" + + def __hash__(self) -> int: + return hash(("", "")) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, BaseMarker): + return NotImplemented + + return isinstance(other, EmptyMarker) + + +class SingleMarker(BaseMarker): + _CONSTRAINT_RE = re.compile(r"(?i)^(~=|!=|>=?|<=?|==?=?|in|not in)?\s*(.+)$") + _VERSION_LIKE_MARKER_NAME = { + "python_version", + "python_full_version", + "platform_release", + } + + def __init__( + self, name: str, constraint: str | BaseConstraint | VersionConstraint + ) -> None: + from poetry.core.constraints.generic import ( + parse_constraint as parse_generic_constraint, + ) + from poetry.core.constraints.version import ( + parse_constraint as parse_version_constraint, + ) + + self._constraint: BaseConstraint | VersionConstraint + self._parser: Callable[[str], BaseConstraint | VersionConstraint] + self._name = ALIASES.get(name, name) + constraint_string = str(constraint) + + # Extract operator and value + m = self._CONSTRAINT_RE.match(constraint_string) + if m is None: + raise InvalidMarker(f"Invalid marker '{constraint_string}'") + + self._operator = m.group(1) + if self._operator is None: + self._operator = "==" + + self._value = m.group(2) + self._parser = parse_generic_constraint + + if name in self._VERSION_LIKE_MARKER_NAME: + self._parser = parse_version_constraint + + if self._operator in {"in", "not in"}: + versions = [] + for v in re.split("[ ,]+", self._value): + split = v.split(".") + if len(split) in [1, 2]: + split.append("*") + op = "" if self._operator == "in" else "!=" + else: + op = "==" if self._operator == "in" else "!=" + + versions.append(op + ".".join(split)) + + glue = ", " + if self._operator == "in": + glue = " || " + + self._constraint = self._parser(glue.join(versions)) + else: + self._constraint = self._parser(constraint_string) + else: + # if we have a in/not in operator we split the constraint + # into a union/multi-constraint of single constraint + if self._operator in {"in", "not in"}: + op, glue = ("==", " || ") if self._operator == "in" else ("!=", ", ") + values = re.split("[ ,]+", self._value) + constraint_string = glue.join(f"{op} {value}" for value in values) + + self._constraint = self._parser(constraint_string) + + @property + def name(self) -> str: + return self._name + + @property + def constraint(self) -> BaseConstraint | VersionConstraint: + return self._constraint + + @property + def operator(self) -> str: + return self._operator + + @property + def value(self) -> str: + return self._value + + def intersect(self, other: BaseMarker) -> BaseMarker: + if isinstance(other, SingleMarker): + return MultiMarker.of(self, other) + + return other.intersect(self) + + def union(self, other: BaseMarker) -> BaseMarker: + if isinstance(other, SingleMarker): + if self == other: + return self + + if self == other.invert(): + return AnyMarker() + + return MarkerUnion.of(self, other) + + return other.union(self) + + def validate(self, environment: dict[str, Any] | None) -> bool: + if environment is None: + return True + + if self._name not in environment: + return True + + # The type of constraint returned by the parser matches our constraint: either + # both are BaseConstraint or both are VersionConstraint. But it's hard for mypy + # to know that. + constraint = self._parser(environment[self._name]) + return self._constraint.allows(constraint) # type: ignore[arg-type] + + def without_extras(self) -> BaseMarker: + return self.exclude("extra") + + def exclude(self, marker_name: str) -> BaseMarker: + if self.name == marker_name: + return AnyMarker() + + return self + + def only(self, *marker_names: str) -> SingleMarker | AnyMarker: + if self.name not in marker_names: + return AnyMarker() + + return self + + def invert(self) -> BaseMarker: + if self._operator in ("===", "=="): + operator = "!=" + elif self._operator == "!=": + operator = "==" + elif self._operator == ">": + operator = "<=" + elif self._operator == ">=": + operator = "<" + elif self._operator == "<": + operator = ">=" + elif self._operator == "<=": + operator = ">" + elif self._operator == "in": + operator = "not in" + elif self._operator == "not in": + operator = "in" + elif self._operator == "~=": + # This one is more tricky to handle + # since it's technically a multi marker + # so the inverse will be a union of inverse + from poetry.core.constraints.version import VersionRangeConstraint + + if not isinstance(self._constraint, VersionRangeConstraint): + # The constraint must be a version range, otherwise + # it's an internal error + raise RuntimeError( + "The '~=' operator should only represent version ranges" + ) + + min_ = self._constraint.min + min_operator = ">=" if self._constraint.include_min else ">" + max_ = self._constraint.max + max_operator = "<=" if self._constraint.include_max else "<" + + return MultiMarker.of( + SingleMarker(self._name, f"{min_operator} {min_}"), + SingleMarker(self._name, f"{max_operator} {max_}"), + ).invert() + else: + # We should never go there + raise RuntimeError(f"Invalid marker operator '{self._operator}'") + + return parse_marker(f"{self._name} {operator} '{self._value}'") + + def __eq__(self, other: object) -> bool: + if not isinstance(other, SingleMarker): + return False + + return self._name == other.name and self._constraint == other.constraint + + def __hash__(self) -> int: + return hash((self._name, self._constraint)) + + def __str__(self) -> str: + return f'{self._name} {self._operator} "{self._value}"' + + +def _flatten_markers( + markers: Iterable[BaseMarker], + flatten_class: type[MarkerUnion | MultiMarker], +) -> list[BaseMarker]: + flattened = [] + + for marker in markers: + if isinstance(marker, flatten_class): + flattened += _flatten_markers( + marker.markers, # type: ignore[attr-defined] + flatten_class, + ) + else: + flattened.append(marker) + + return flattened + + +class MultiMarker(BaseMarker): + def __init__(self, *markers: BaseMarker) -> None: + self._markers = [] + + flattened_markers = _flatten_markers(markers, MultiMarker) + + for m in flattened_markers: + self._markers.append(m) + + @classmethod + def of(cls, *markers: BaseMarker) -> BaseMarker: + new_markers = _flatten_markers(markers, MultiMarker) + old_markers: list[BaseMarker] = [] + + while old_markers != new_markers: + old_markers = new_markers + new_markers = [] + for marker in old_markers: + if marker in new_markers: + continue + + if marker.is_any(): + continue + + if isinstance(marker, SingleMarker): + intersected = False + for i, mark in enumerate(new_markers): + if isinstance(mark, SingleMarker) and ( + mark.name == marker.name + or {mark.name, marker.name} == PYTHON_VERSION_MARKERS + ): + new_marker = _merge_single_markers(mark, marker, cls) + if new_marker is not None: + new_markers[i] = new_marker + intersected = True + + elif isinstance(mark, MarkerUnion): + intersection = mark.intersect(marker) + if isinstance(intersection, SingleMarker): + new_markers[i] = intersection + elif intersection.is_empty(): + return EmptyMarker() + if intersected: + continue + + elif isinstance(marker, MarkerUnion): + for mark in new_markers: + if isinstance(mark, SingleMarker): + intersection = marker.intersect(mark) + if isinstance(intersection, SingleMarker): + marker = intersection + break + elif intersection.is_empty(): + return EmptyMarker() + + new_markers.append(marker) + + if any(m.is_empty() for m in new_markers) or not new_markers: + return EmptyMarker() + + if len(new_markers) == 1: + return new_markers[0] + + return MultiMarker(*new_markers) + + @property + def markers(self) -> list[BaseMarker]: + return self._markers + + def intersect(self, other: BaseMarker) -> BaseMarker: + if other.is_any(): + return self + + if other.is_empty(): + return other + + if isinstance(other, MarkerUnion): + return other.intersect(self) + + new_markers = self._markers + [other] + + return MultiMarker.of(*new_markers) + + def union(self, other: BaseMarker) -> BaseMarker: + if isinstance(other, (SingleMarker, MultiMarker)): + return MarkerUnion.of(self, other) + + return other.union(self) + + def union_simplify(self, other: BaseMarker) -> BaseMarker | None: + """ + In contrast to the standard union method, which prefers to return + a MarkerUnion of MultiMarkers, this version prefers to return + a MultiMarker of MarkerUnions. + + The rationale behind this approach is to find additional simplifications. + In order to avoid endless recursions, this method returns None + if it cannot find a simplification. + """ + if isinstance(other, SingleMarker): + new_markers = [] + for marker in self._markers: + union = marker.union(other) + if not union.is_any(): + new_markers.append(union) + + if len(new_markers) == 1: + return new_markers[0] + if other in new_markers and all( + other == m or isinstance(m, MarkerUnion) and other in m.markers + for m in new_markers + ): + return other + + if not any(isinstance(m, MarkerUnion) for m in new_markers): + return self.of(*new_markers) + + elif isinstance(other, MultiMarker): + common_markers = [ + marker for marker in self.markers if marker in other.markers + ] + + unique_markers = [ + marker for marker in self.markers if marker not in common_markers + ] + if not unique_markers: + return self + + other_unique_markers = [ + marker for marker in other.markers if marker not in common_markers + ] + if not other_unique_markers: + return other + + if common_markers: + unique_union = self.of(*unique_markers).union( + self.of(*other_unique_markers) + ) + if not isinstance(unique_union, MarkerUnion): + return self.of(*common_markers).intersect(unique_union) + + else: + # Usually this operation just complicates things, but the special case + # where it doesn't allows the collapse of adjacent ranges eg + # + # 'python_version >= "3.6" and python_version < "3.6.2"' union + # 'python_version >= "3.6.2" and python_version < "3.7"' -> + # + # 'python_version >= "3.6" and python_version < "3.7"'. + unions = [ + m1.union(m2) for m2 in other_unique_markers for m1 in unique_markers + ] + conjunction = self.of(*unions) + if not isinstance(conjunction, MultiMarker) or not any( + isinstance(m, MarkerUnion) for m in conjunction.markers + ): + return conjunction + + return None + + def validate(self, environment: dict[str, Any] | None) -> bool: + return all(m.validate(environment) for m in self._markers) + + def without_extras(self) -> BaseMarker: + return self.exclude("extra") + + def exclude(self, marker_name: str) -> BaseMarker: + new_markers = [] + + for m in self._markers: + if isinstance(m, SingleMarker) and m.name == marker_name: + # The marker is not relevant since it must be excluded + continue + + marker = m.exclude(marker_name) + + if not marker.is_empty(): + new_markers.append(marker) + + return self.of(*new_markers) + + def only(self, *marker_names: str) -> BaseMarker: + new_markers = [] + + for m in self._markers: + if isinstance(m, SingleMarker) and m.name not in marker_names: + # The marker is not relevant since it's not one we want + continue + + marker = m.only(*marker_names) + + if not marker.is_empty(): + new_markers.append(marker) + + return self.of(*new_markers) + + def invert(self) -> BaseMarker: + markers = [marker.invert() for marker in self._markers] + + return MarkerUnion.of(*markers) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MultiMarker): + return False + + return set(self._markers) == set(other.markers) + + def __hash__(self) -> int: + h = hash("multi") + for m in self._markers: + h ^= hash(m) + + return h + + def __str__(self) -> str: + elements = [] + for m in self._markers: + if isinstance(m, (SingleMarker, MultiMarker)): + elements.append(str(m)) + else: + elements.append(f"({str(m)})") + + return " and ".join(elements) + + +class MarkerUnion(BaseMarker): + def __init__(self, *markers: BaseMarker) -> None: + self._markers = list(markers) + + @property + def markers(self) -> list[BaseMarker]: + return self._markers + + @classmethod + def of(cls, *markers: BaseMarker) -> BaseMarker: + new_markers = _flatten_markers(markers, MarkerUnion) + old_markers: list[BaseMarker] = [] + + while old_markers != new_markers: + old_markers = new_markers + new_markers = [] + for marker in old_markers: + if marker in new_markers or marker.is_empty(): + continue + + included = False + + if isinstance(marker, SingleMarker): + for i, mark in enumerate(new_markers): + if isinstance(mark, SingleMarker) and ( + mark.name == marker.name + or {mark.name, marker.name} == PYTHON_VERSION_MARKERS + ): + new_marker = _merge_single_markers(mark, marker, cls) + if new_marker is not None: + new_markers[i] = new_marker + included = True + break + + elif isinstance(mark, MultiMarker): + union = mark.union_simplify(marker) + if union is not None: + new_markers[i] = union + included = True + break + + elif isinstance(marker, MultiMarker): + included = False + for i, mark in enumerate(new_markers): + union = marker.union_simplify(mark) + if union is not None: + new_markers[i] = union + included = True + break + + if included: + # flatten again because union_simplify may return a union + new_markers = _flatten_markers(new_markers, MarkerUnion) + else: + new_markers.append(marker) + + if any(m.is_any() for m in new_markers): + return AnyMarker() + + if not new_markers: + return EmptyMarker() + + if len(new_markers) == 1: + return new_markers[0] + + return MarkerUnion(*new_markers) + + def append(self, marker: BaseMarker) -> None: + if marker in self._markers: + return + + self._markers.append(marker) + + def intersect(self, other: BaseMarker) -> BaseMarker: + if other.is_any(): + return self + + if other.is_empty(): + return other + + new_markers = [] + if isinstance(other, (SingleMarker, MultiMarker)): + for marker in self._markers: + intersection = marker.intersect(other) + + if not intersection.is_empty(): + new_markers.append(intersection) + elif isinstance(other, MarkerUnion): + for our_marker in self._markers: + for their_marker in other.markers: + intersection = our_marker.intersect(their_marker) + + if not intersection.is_empty(): + new_markers.append(intersection) + + return MarkerUnion.of(*new_markers) + + def union(self, other: BaseMarker) -> BaseMarker: + if other.is_any(): + return other + + if other.is_empty(): + return self + + new_markers = self._markers + [other] + + return MarkerUnion.of(*new_markers) + + def validate(self, environment: dict[str, Any] | None) -> bool: + return any(m.validate(environment) for m in self._markers) + + def without_extras(self) -> BaseMarker: + return self.exclude("extra") + + def exclude(self, marker_name: str) -> BaseMarker: + new_markers = [] + + for m in self._markers: + if isinstance(m, SingleMarker) and m.name == marker_name: + # The marker is not relevant since it must be excluded + continue + + marker = m.exclude(marker_name) + new_markers.append(marker) + + if not new_markers: + # All markers were the excluded marker. + return AnyMarker() + + return self.of(*new_markers) + + def only(self, *marker_names: str) -> BaseMarker: + new_markers = [] + + for m in self._markers: + if isinstance(m, SingleMarker) and m.name not in marker_names: + # The marker is not relevant since it's not one we want + continue + + marker = m.only(*marker_names) + + if not marker.is_empty(): + new_markers.append(marker) + + return self.of(*new_markers) + + def invert(self) -> BaseMarker: + markers = [marker.invert() for marker in self._markers] + + return MultiMarker.of(*markers) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, MarkerUnion): + return False + + return set(self._markers) == set(other.markers) + + def __hash__(self) -> int: + h = hash("union") + for m in self._markers: + h ^= hash(m) + + return h + + def __str__(self) -> str: + return " or ".join( + str(m) for m in self._markers if not m.is_any() and not m.is_empty() + ) + + def is_any(self) -> bool: + return any(m.is_any() for m in self._markers) + + def is_empty(self) -> bool: + return all(m.is_empty() for m in self._markers) + + +def parse_marker(marker: str) -> BaseMarker: + if marker == "": + return EmptyMarker() + + if not marker or marker == "*": + return AnyMarker() + + parsed = _parser.parse(marker) + + markers = _compact_markers(parsed.children) + + return markers + + +def _compact_markers(tree_elements: Tree, tree_prefix: str = "") -> BaseMarker: + from lark import Token + + groups: list[BaseMarker] = [MultiMarker()] + for token in tree_elements: + if isinstance(token, Token): + if token.type == f"{tree_prefix}BOOL_OP" and token.value == "or": + groups.append(MultiMarker()) + + continue + + if token.data == "marker": + groups[-1] = MultiMarker.of( + groups[-1], _compact_markers(token.children, tree_prefix=tree_prefix) + ) + elif token.data == f"{tree_prefix}item": + name, op, value = token.children + if value.type == f"{tree_prefix}MARKER_NAME": + name, value, = ( + value, + name, + ) + + value = value[1:-1] + groups[-1] = MultiMarker.of( + groups[-1], SingleMarker(str(name), f"{op}{value}") + ) + elif token.data == f"{tree_prefix}BOOL_OP" and token.children[0] == "or": + groups.append(MultiMarker()) + + for i, group in enumerate(reversed(groups)): + if group.is_empty(): + del groups[len(groups) - 1 - i] + continue + + if isinstance(group, MultiMarker) and len(group.markers) == 1: + groups[len(groups) - 1 - i] = group.markers[0] + + if not groups: + return EmptyMarker() + + if len(groups) == 1: + return groups[0] + + return MarkerUnion.of(*groups) + + +def dnf(marker: BaseMarker) -> BaseMarker: + """Transforms the marker into DNF (disjunctive normal form).""" + if isinstance(marker, MultiMarker): + dnf_markers = [dnf(m) for m in marker.markers] + sub_marker_lists = [ + m.markers if isinstance(m, MarkerUnion) else [m] for m in dnf_markers + ] + return MarkerUnion.of( + *[MultiMarker.of(*c) for c in itertools.product(*sub_marker_lists)] + ) + if isinstance(marker, MarkerUnion): + return MarkerUnion.of(*[dnf(m) for m in marker.markers]) + return marker + + +def _merge_single_markers( + marker1: SingleMarker, + marker2: SingleMarker, + merge_class: type[MultiMarker | MarkerUnion], +) -> BaseMarker | None: + if {marker1.name, marker2.name} == PYTHON_VERSION_MARKERS: + return _merge_python_version_single_markers(marker1, marker2, merge_class) + + if merge_class == MultiMarker: + merge_method = marker1.constraint.intersect + else: + merge_method = marker1.constraint.union + # Markers with the same name have the same constraint type, + # but mypy can't see that. + result_constraint = merge_method(marker2.constraint) # type: ignore[arg-type] + + result_marker: BaseMarker | None = None + if result_constraint.is_empty(): + result_marker = EmptyMarker() + elif result_constraint.is_any(): + result_marker = AnyMarker() + elif result_constraint == marker1.constraint: + result_marker = marker1 + elif result_constraint == marker2.constraint: + result_marker = marker2 + elif ( + isinstance(result_constraint, VersionConstraint) + and result_constraint.is_simple() + ): + result_marker = SingleMarker(marker1.name, result_constraint) + return result_marker + + +def _merge_python_version_single_markers( + marker1: SingleMarker, + marker2: SingleMarker, + merge_class: type[MultiMarker | MarkerUnion], +) -> BaseMarker | None: + from poetry.core.packages.utils.utils import get_python_constraint_from_marker + + if marker1.name == "python_version": + version_marker = marker1 + full_version_marker = marker2 + else: + version_marker = marker2 + full_version_marker = marker1 + + normalized_constraint = get_python_constraint_from_marker(version_marker) + normalized_marker = SingleMarker("python_full_version", normalized_constraint) + merged_marker = _merge_single_markers( + normalized_marker, full_version_marker, merge_class + ) + if merged_marker == normalized_marker: + # prefer original marker to avoid unnecessary changes + return version_marker + if merged_marker and isinstance(merged_marker, SingleMarker): + # We have to fix markers like 'python_full_version == "3.6"' + # to receive 'python_full_version == "3.6.0"'. + # It seems a bit hacky to convert to string and back to marker, + # but it's probably much simpler than to consider the different constraint + # classes (mostly VersonRangeConstraint, but VersionUnion for "!=") and + # since this conversion is only required for python_full_version markers + # it may be sufficient to handle it here. + marker_string = str(merged_marker) + precision = marker_string.count(".") + 1 + if precision < 3: + marker_string = marker_string[:-1] + ".0" * (3 - precision) + '"' + merged_marker = parse_marker(marker_string) + return merged_marker diff --git a/src/poetry/core/version/parser.py b/src/poetry/core/version/parser.py new file mode 100644 index 0000000..085cfa3 --- /dev/null +++ b/src/poetry/core/version/parser.py @@ -0,0 +1,31 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import Any + + +if TYPE_CHECKING: + from pathlib import Path + + from lark import Lark + from lark import Tree + + +class Parser: + def __init__( + self, grammar: Path, parser: str = "lalr", debug: bool = False + ) -> None: + self._grammar = grammar + self._parser = parser + self._debug = debug + self._lark: Lark | None = None + + def parse(self, text: str, **kwargs: Any) -> Tree: + from lark import Lark + + if self._lark is None: + self._lark = Lark.open( + grammar_filename=self._grammar, parser=self._parser, debug=self._debug + ) + + return self._lark.parse(text=text, **kwargs) diff --git a/src/poetry/core/version/pep440/__init__.py b/src/poetry/core/version/pep440/__init__.py new file mode 100644 index 0000000..7c7b3f8 --- /dev/null +++ b/src/poetry/core/version/pep440/__init__.py @@ -0,0 +1,9 @@ +from __future__ import annotations + +from poetry.core.version.pep440.segments import LocalSegmentType +from poetry.core.version.pep440.segments import Release +from poetry.core.version.pep440.segments import ReleaseTag +from poetry.core.version.pep440.version import PEP440Version + + +__all__ = ["LocalSegmentType", "Release", "ReleaseTag", "PEP440Version"] diff --git a/src/poetry/core/version/pep440/parser.py b/src/poetry/core/version/pep440/parser.py new file mode 100644 index 0000000..11fae07 --- /dev/null +++ b/src/poetry/core/version/pep440/parser.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +import re + +from typing import TYPE_CHECKING +from typing import Match +from typing import TypeVar + +from packaging.version import VERSION_PATTERN + +from poetry.core.version.exceptions import InvalidVersion +from poetry.core.version.pep440 import Release +from poetry.core.version.pep440 import ReleaseTag + + +if TYPE_CHECKING: + from poetry.core.version.pep440 import LocalSegmentType + from poetry.core.version.pep440.version import PEP440Version + +T = TypeVar("T", bound="PEP440Version") + + +class PEP440Parser: + _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE) + _local_version_separators = re.compile(r"[._-]") + + @classmethod + def _get_release(cls, match: Match[str] | None) -> Release: + if not match or match.group("release") is None: + return Release(0) + return Release.from_parts(*(int(i) for i in match.group("release").split("."))) + + @classmethod + def _get_prerelease(cls, match: Match[str] | None) -> ReleaseTag | None: + if not match or match.group("pre") is None: + return None + return ReleaseTag(match.group("pre_l"), int(match.group("pre_n") or 0)) + + @classmethod + def _get_postrelease(cls, match: Match[str] | None) -> ReleaseTag | None: + if not match or match.group("post") is None: + return None + + return ReleaseTag( + match.group("post_l") or "post", + int(match.group("post_n1") or match.group("post_n2") or 0), + ) + + @classmethod + def _get_devrelease(cls, match: Match[str] | None) -> ReleaseTag | None: + if not match or match.group("dev") is None: + return None + return ReleaseTag(match.group("dev_l"), int(match.group("dev_n") or 0)) + + @classmethod + def _get_local(cls, match: Match[str] | None) -> LocalSegmentType | None: + if not match or match.group("local") is None: + return None + + return tuple( + part.lower() + for part in cls._local_version_separators.split(match.group("local")) + ) + + @classmethod + def parse(cls, value: str, version_class: type[T]) -> T: + match = cls._regex.search(value) if value else None + if not match: + raise InvalidVersion(f"Invalid PEP 440 version: '{value}'") + + return version_class( + epoch=int(match.group("epoch")) if match.group("epoch") else 0, + release=cls._get_release(match), + pre=cls._get_prerelease(match), + post=cls._get_postrelease(match), + dev=cls._get_devrelease(match), + local=cls._get_local(match), + text=value, + ) + + +def parse_pep440(value: str, version_class: type[T]) -> T: + return PEP440Parser.parse(value, version_class) diff --git a/src/poetry/core/version/pep440/segments.py b/src/poetry/core/version/pep440/segments.py new file mode 100644 index 0000000..735f523 --- /dev/null +++ b/src/poetry/core/version/pep440/segments.py @@ -0,0 +1,146 @@ +from __future__ import annotations + +import dataclasses + +from typing import Optional +from typing import Tuple +from typing import Union + + +# Release phase IDs according to PEP440 +RELEASE_PHASE_ID_ALPHA = "a" +RELEASE_PHASE_ID_BETA = "b" +RELEASE_PHASE_ID_RC = "rc" +RELEASE_PHASE_ID_POST = "post" +RELEASE_PHASE_ID_DEV = "dev" + +RELEASE_PHASE_SPELLINGS = { + RELEASE_PHASE_ID_ALPHA: {RELEASE_PHASE_ID_ALPHA, "alpha"}, + RELEASE_PHASE_ID_BETA: {RELEASE_PHASE_ID_BETA, "beta"}, + RELEASE_PHASE_ID_RC: {RELEASE_PHASE_ID_RC, "c", "pre", "preview"}, + RELEASE_PHASE_ID_POST: {RELEASE_PHASE_ID_POST, "r", "rev", "-"}, + RELEASE_PHASE_ID_DEV: {RELEASE_PHASE_ID_DEV}, +} +RELEASE_PHASE_NORMALIZATIONS = { + s: id_ for id_, spellings in RELEASE_PHASE_SPELLINGS.items() for s in spellings +} + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class Release: + major: int = dataclasses.field(default=0, compare=False) + minor: int | None = dataclasses.field(default=None, compare=False) + patch: int | None = dataclasses.field(default=None, compare=False) + # some projects use non-semver versioning schemes, eg: 1.2.3.4 + extra: tuple[int, ...] = dataclasses.field(default=(), compare=False) + precision: int = dataclasses.field(init=False, compare=False) + text: str = dataclasses.field(init=False, compare=False) + _compare_key: tuple[int, ...] = dataclasses.field(init=False, compare=True) + + def __post_init__(self) -> None: + if self.extra: + if self.minor is None: + object.__setattr__(self, "minor", 0) + if self.patch is None: + object.__setattr__(self, "patch", 0) + parts = [ + str(part) + for part in [self.major, self.minor, self.patch, *self.extra] + if part is not None + ] + object.__setattr__(self, "text", ".".join(parts)) + object.__setattr__(self, "precision", len(parts)) + object.__setattr__( + self, + "_compare_key", + (self.major, self.minor or 0, self.patch or 0, *self.extra), + ) + + @classmethod + def from_parts(cls, *parts: int) -> Release: + if not parts: + return cls() + + return cls( + major=parts[0], + minor=parts[1] if len(parts) > 1 else None, + patch=parts[2] if len(parts) > 2 else None, + extra=parts[3:], + ) + + def to_string(self) -> str: + return self.text + + def next_major(self) -> Release: + return dataclasses.replace( + self, + major=self.major + 1, + minor=0 if self.minor is not None else None, + patch=0 if self.patch is not None else None, + extra=tuple(0 for _ in self.extra), + ) + + def next_minor(self) -> Release: + return dataclasses.replace( + self, + major=self.major, + minor=self.minor + 1 if self.minor is not None else 1, + patch=0 if self.patch is not None else None, + extra=tuple(0 for _ in self.extra), + ) + + def next_patch(self) -> Release: + return dataclasses.replace( + self, + major=self.major, + minor=self.minor if self.minor is not None else 0, + patch=self.patch + 1 if self.patch is not None else 1, + extra=tuple(0 for _ in self.extra), + ) + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class ReleaseTag: + phase: str + number: int = dataclasses.field(default=0) + + def __post_init__(self) -> None: + object.__setattr__( + self, "phase", RELEASE_PHASE_NORMALIZATIONS.get(self.phase, self.phase) + ) + + def to_string(self, short: bool = False) -> str: + if short: + import warnings + + warnings.warn( + "Parameter 'short' has no effect and will be removed. " + "(Release tags are always normalized according to PEP 440 now.)", + DeprecationWarning, + stacklevel=2, + ) + + return f"{self.phase}{self.number}" + + def next(self) -> ReleaseTag: + return dataclasses.replace(self, phase=self.phase, number=self.number + 1) + + def next_phase(self) -> ReleaseTag | None: + if self.phase in [ + RELEASE_PHASE_ID_POST, + RELEASE_PHASE_ID_RC, + RELEASE_PHASE_ID_DEV, + ]: + return None + + if self.phase == RELEASE_PHASE_ID_ALPHA: + _phase = RELEASE_PHASE_ID_BETA + elif self.phase == RELEASE_PHASE_ID_BETA: + _phase = RELEASE_PHASE_ID_RC + else: + return None + + return self.__class__(phase=_phase, number=0) + + +LocalSegmentType = Optional[Union[str, int, Tuple[Union[str, int], ...]]] diff --git a/src/poetry/core/version/pep440/version.py b/src/poetry/core/version/pep440/version.py new file mode 100644 index 0000000..eeae009 --- /dev/null +++ b/src/poetry/core/version/pep440/version.py @@ -0,0 +1,317 @@ +from __future__ import annotations + +import dataclasses +import functools +import warnings + +from typing import TYPE_CHECKING +from typing import Any +from typing import TypeVar + +from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_ALPHA +from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_DEV +from poetry.core.version.pep440.segments import RELEASE_PHASE_ID_POST +from poetry.core.version.pep440.segments import Release +from poetry.core.version.pep440.segments import ReleaseTag + + +if TYPE_CHECKING: + from poetry.core.version.pep440.segments import LocalSegmentType + + +@functools.total_ordering +class AlwaysSmaller: + def __lt__(self, other: object) -> bool: + return True + + +@functools.total_ordering +class AlwaysGreater: + def __gt__(self, other: object) -> bool: + return True + + +class Infinity(AlwaysGreater, int): + pass + + +class NegativeInfinity(AlwaysSmaller, int): + pass + + +T = TypeVar("T", bound="PEP440Version") + +# we use the phase "z" to ensure we always sort this after other phases +_INF_TAG = ReleaseTag("z", Infinity()) +# we use the phase "" to ensure we always sort this before other phases +_NEG_INF_TAG = ReleaseTag("", NegativeInfinity()) + + +@dataclasses.dataclass(frozen=True, eq=True, order=True) +class PEP440Version: + epoch: int = dataclasses.field(default=0, compare=False) + release: Release = dataclasses.field(default_factory=Release, compare=False) + pre: ReleaseTag | None = dataclasses.field(default=None, compare=False) + post: ReleaseTag | None = dataclasses.field(default=None, compare=False) + dev: ReleaseTag | None = dataclasses.field(default=None, compare=False) + local: LocalSegmentType = dataclasses.field(default=None, compare=False) + text: str = dataclasses.field(default="", compare=False) + _compare_key: tuple[ + int, Release, ReleaseTag, ReleaseTag, ReleaseTag, tuple[int | str, ...] + ] = dataclasses.field(init=False, compare=True) + + def __post_init__(self) -> None: + if self.local is not None and not isinstance(self.local, tuple): + object.__setattr__(self, "local", (self.local,)) + + if isinstance(self.release, tuple): + object.__setattr__(self, "release", Release(*self.release)) + + # we do this here to handle both None and tomlkit string values + object.__setattr__( + self, "text", self.to_string() if not self.text else str(self.text) + ) + + object.__setattr__(self, "_compare_key", self._make_compare_key()) + + def _make_compare_key( + self, + ) -> tuple[ + int, + Release, + ReleaseTag, + ReleaseTag, + ReleaseTag, + tuple[tuple[int, int | str], ...], + ]: + """ + This code is based on the implementation of packaging.version._cmpkey(..) + """ + # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0. + # We'll do this by abusing the pre segment, but we _only_ want to do this + # if there is not a pre or a post segment. If we have one of those then + # the normal sorting rules will handle this case correctly. + if self.pre is None and self.post is None and self.dev is not None: + _pre = _NEG_INF_TAG + # Versions without a pre-release (except as noted above) should sort after + # those with one. + elif self.pre is None: + _pre = _INF_TAG + else: + _pre = self.pre + + # Versions without a post segment should sort before those with one. + _post = _NEG_INF_TAG if self.post is None else self.post + + # Versions without a development segment should sort after those with one. + _dev = _INF_TAG if self.dev is None else self.dev + + _local: tuple[tuple[int, int | str], ...] + if self.local is None: + # Versions without a local segment should sort before those with one. + _local = ((NegativeInfinity(), ""),) + else: + # Versions with a local segment need that segment parsed to implement + # the sorting rules in PEP440. + # - Alpha numeric segments sort before numeric segments + # - Alpha numeric segments sort lexicographically + # - Numeric segments sort numerically + # - Shorter versions sort before longer versions when the prefixes + # match exactly + assert isinstance(self.local, tuple) + _local = tuple( + # We typecast strings that are integers so that they can be compared + (int(i), "") if str(i).isnumeric() else (NegativeInfinity(), i) + for i in self.local + ) + return self.epoch, self.release, _pre, _post, _dev, _local + + @property + def major(self) -> int: + return self.release.major + + @property + def minor(self) -> int | None: + return self.release.minor + + @property + def patch(self) -> int | None: + return self.release.patch + + @property + def non_semver_parts(self) -> tuple[int, ...]: + assert isinstance(self.release.extra, tuple) + return self.release.extra + + def to_string(self, short: bool = False) -> str: + if short: + import warnings + + warnings.warn( + "Parameter 'short' has no effect and will be removed. " + "(Versions are always normalized according to PEP 440 now.)", + DeprecationWarning, + stacklevel=2, + ) + + version_string = self.release.to_string() + + if self.epoch: + # if epoch is non-zero we should include it + version_string = f"{self.epoch}!{version_string}" + + if self.pre: + version_string += self.pre.to_string() + + if self.post: + version_string = f"{version_string}.{self.post.to_string()}" + + if self.dev: + version_string = f"{version_string}.{self.dev.to_string()}" + + if self.local: + assert isinstance(self.local, tuple) + version_string += "+" + ".".join(map(str, self.local)) + + return version_string.lower() + + @classmethod + def parse(cls: type[T], value: str) -> T: + from poetry.core.version.pep440.parser import parse_pep440 + + return parse_pep440(value, cls) + + def is_prerelease(self) -> bool: + return self.pre is not None + + def is_postrelease(self) -> bool: + return self.post is not None + + def is_devrelease(self) -> bool: + return self.dev is not None + + def is_local(self) -> bool: + return self.local is not None + + def is_no_suffix_release(self) -> bool: + return not (self.pre or self.post or self.dev) + + def is_unstable(self) -> bool: + return self.is_prerelease() or self.is_devrelease() + + def is_stable(self) -> bool: + return not self.is_unstable() + + def _is_increment_required(self) -> bool: + return self.is_stable() or (not self.is_prerelease() and self.is_postrelease()) + + def next_major(self: T) -> T: + release = self.release + if self._is_increment_required() or Release(release.major, 0, 0) < release: + release = release.next_major() + return self.__class__(epoch=self.epoch, release=release) + + def next_minor(self: T) -> T: + release = self.release + if ( + self._is_increment_required() + or Release(release.major, release.minor, 0) < release + ): + release = release.next_minor() + return self.__class__(epoch=self.epoch, release=release) + + def next_patch(self: T) -> T: + release = self.release + if ( + self._is_increment_required() + or Release(release.major, release.minor, release.patch) < release + ): + release = release.next_patch() + return self.__class__(epoch=self.epoch, release=release) + + def next_prerelease(self: T, next_phase: bool = False) -> PEP440Version: + if self.is_stable(): + warnings.warn( + "Calling next_prerelease() on a stable release is deprecated for its" + " ambiguity. Use next_major(), next_minor(), etc. together with" + " first_prerelease()", + DeprecationWarning, + stacklevel=2, + ) + if self.is_prerelease(): + assert self.pre is not None + if not self.is_devrelease() or self.is_postrelease(): + pre = self.pre.next_phase() if next_phase else self.pre.next() + else: + pre = self.pre + else: + pre = ReleaseTag(RELEASE_PHASE_ID_ALPHA) + return self.__class__(epoch=self.epoch, release=self.release, pre=pre) + + def next_postrelease(self: T) -> T: + if self.is_postrelease(): + assert self.post is not None + post = self.post.next() if self.dev is None else self.post + else: + post = ReleaseTag(RELEASE_PHASE_ID_POST) + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=post, + ) + + def next_devrelease(self: T) -> T: + if self.is_devrelease(): + assert self.dev is not None + dev = self.dev.next() + else: + warnings.warn( + "Calling next_devrelease() on a non dev release is deprecated for its" + " ambiguity. Use next_major(), next_minor(), etc. together with" + " first_devrelease()", + DeprecationWarning, + stacklevel=2, + ) + dev = ReleaseTag(RELEASE_PHASE_ID_DEV) + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=self.post, + dev=dev, + ) + + def first_prerelease(self: T) -> T: + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=ReleaseTag(RELEASE_PHASE_ID_ALPHA), + ) + + def first_devrelease(self: T) -> T: + return self.__class__( + epoch=self.epoch, + release=self.release, + pre=self.pre, + post=self.post, + dev=ReleaseTag(RELEASE_PHASE_ID_DEV), + ) + + def replace(self: T, **kwargs: Any) -> T: + return self.__class__( + **{ + **{ + k: getattr(self, k) + for k in self.__dataclass_fields__.keys() + if k not in ("_compare_key", "text") + }, # setup defaults with current values, excluding compare keys and text + **kwargs, # keys to replace + } + ) + + def without_local(self: T) -> T: + return self.replace(local=None) + + def without_postrelease(self: T) -> T: + return self.replace(post=None) diff --git a/src/poetry/core/version/requirements.py b/src/poetry/core/version/requirements.py new file mode 100644 index 0000000..cadc28f --- /dev/null +++ b/src/poetry/core/version/requirements.py @@ -0,0 +1,108 @@ +from __future__ import annotations + +import urllib.parse as urlparse + +from poetry.core.constraints.version import parse_constraint +from poetry.core.constraints.version.exceptions import ParseConstraintError +from poetry.core.version.grammars import GRAMMAR_PEP_508_CONSTRAINTS +from poetry.core.version.markers import _compact_markers +from poetry.core.version.parser import Parser + + +class InvalidRequirement(ValueError): + """ + An invalid requirement was found, users should refer to PEP 508. + """ + + +# Parser: PEP 508 Constraints +_parser = Parser(GRAMMAR_PEP_508_CONSTRAINTS, "lalr") + + +class Requirement: + """ + Parse a requirement. + + Parse a given requirement string into its parts, such as name, specifier, + URL, and extras. Raises InvalidRequirement on a badly-formed requirement + string. + """ + + def __init__(self, requirement_string: str) -> None: + from lark import UnexpectedCharacters + from lark import UnexpectedToken + + try: + parsed = _parser.parse(requirement_string) + except (UnexpectedCharacters, UnexpectedToken) as e: + raise InvalidRequirement( + "The requirement is invalid: Unexpected character at column" + f" {e.column}\n\n{e.get_context(requirement_string)}" + ) + + self.name: str = next(parsed.scan_values(lambda t: t.type == "NAME")).value + url = next(parsed.scan_values(lambda t: t.type == "URI"), None) + + if url: + url = url.value + parsed_url = urlparse.urlparse(url) + if parsed_url.scheme == "file": + if urlparse.urlunparse(parsed_url) != url: + raise InvalidRequirement( + f'The requirement is invalid: invalid URL "{url}"' + ) + elif ( + not (parsed_url.scheme and parsed_url.netloc) + or (not parsed_url.scheme and not parsed_url.netloc) + ) and not parsed_url.path: + raise InvalidRequirement( + f'The requirement is invalid: invalid URL "{url}"' + ) + self.url = url + else: + self.url = None + + self.extras = [e.value for e in parsed.scan_values(lambda t: t.type == "EXTRA")] + constraint = next(parsed.find_data("version_specification"), None) + if not constraint: + constraint = "*" + else: + constraint = ",".join(constraint.children) + + try: + self.constraint = parse_constraint(constraint) + except ParseConstraintError: + raise InvalidRequirement( + f'The requirement is invalid: invalid version constraint "{constraint}"' + ) + + self.pretty_constraint = constraint + + marker = next(parsed.find_data("marker_spec"), None) + if marker: + marker = _compact_markers( + marker.children[0].children, tree_prefix="markers__" + ) + + self.marker = marker + + def __str__(self) -> str: + parts = [self.name] + + if self.extras: + extras = ",".join(sorted(self.extras)) + parts.append(f"[{extras}]") + + if self.pretty_constraint: + parts.append(self.pretty_constraint) + + if self.url: + parts.append(f"@ {self.url}") + + if self.marker: + parts.append(f"; {self.marker}") + + return "".join(parts) + + def __repr__(self) -> str: + return f"" diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..30e87fc --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import sys +import tempfile + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Callable +from typing import Iterator + +import pytest +import virtualenv + +from poetry.core.factory import Factory +from poetry.core.utils._compat import WINDOWS + + +if TYPE_CHECKING: + from _pytest.config import Config + from _pytest.config.argparsing import Parser + + +def pytest_addoption(parser: Parser) -> None: + parser.addoption( + "--integration", + action="store_true", + dest="integration", + default=False, + help="enable integration tests", + ) + + +def pytest_configure(config: Config) -> None: + config.addinivalue_line("markers", "integration: mark integration tests") + + if not config.option.integration: + config.option.markexpr = "not integration" + + +def get_project_from_dir(base_directory: Path) -> Callable[[str], Path]: + def get(name: str) -> Path: + path = base_directory / name + if not path.exists(): + raise FileNotFoundError(str(path)) + return path + + return get + + +@pytest.fixture(scope="session") +def project_source_root() -> Path: + return Path(__file__).parent.parent + + +@pytest.fixture(scope="session") +def project_source_test_root() -> Path: + return Path(__file__).parent + + +@pytest.fixture(scope="session") +def common_fixtures_directory(project_source_test_root: Path) -> Path: + return project_source_test_root / "fixtures" + + +@pytest.fixture(scope="session") +def common_project(common_fixtures_directory: Path) -> Callable[[str], Path]: + return get_project_from_dir(common_fixtures_directory) + + +@pytest.fixture(scope="session") +def masonry_fixtures_directory(project_source_test_root: Path) -> Path: + return project_source_test_root / "masonry" / "builders" / "fixtures" + + +@pytest.fixture(scope="session") +def masonry_project( + masonry_fixtures_directory: Path, +) -> Callable[[str], Path]: + return get_project_from_dir(masonry_fixtures_directory) + + +@pytest.fixture +def temporary_directory() -> Iterator[Path]: + with tempfile.TemporaryDirectory(prefix="poetry-core") as tmp: + yield Path(tmp) + + +@pytest.fixture +def venv(temporary_directory: Path) -> Path: + venv_dir = temporary_directory / ".venv" + virtualenv.cli_run( + [ + "--no-download", + "--no-periodic-update", + "--python", + sys.executable, + venv_dir.as_posix(), + ] + ) + return venv_dir + + +@pytest.fixture +def python(venv: Path) -> str: + return venv.joinpath("Scripts/Python.exe" if WINDOWS else "bin/python").as_posix() + + +@pytest.fixture() +def f() -> Factory: + return Factory() diff --git a/tests/constraints/__init__.py b/tests/constraints/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/constraints/generic/__init__.py b/tests/constraints/generic/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/constraints/generic/test_constraint.py b/tests/constraints/generic/test_constraint.py new file mode 100644 index 0000000..73a2a73 --- /dev/null +++ b/tests/constraints/generic/test_constraint.py @@ -0,0 +1,185 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.constraints.generic import AnyConstraint +from poetry.core.constraints.generic import Constraint +from poetry.core.constraints.generic import EmptyConstraint +from poetry.core.constraints.generic import MultiConstraint +from poetry.core.constraints.generic import UnionConstraint + + +if TYPE_CHECKING: + from poetry.core.constraints.generic import BaseConstraint + + +def test_allows() -> None: + c = Constraint("win32") + + assert c.allows(Constraint("win32")) + assert not c.allows(Constraint("linux")) + + c = Constraint("win32", "!=") + + assert not c.allows(Constraint("win32")) + assert c.allows(Constraint("linux")) + + +def test_allows_any() -> None: + c = Constraint("win32") + + assert c.allows_any(Constraint("win32")) + assert not c.allows_any(Constraint("linux")) + assert c.allows_any(UnionConstraint(Constraint("win32"), Constraint("linux"))) + assert c.allows_any(Constraint("linux", "!=")) + + c = Constraint("win32", "!=") + + assert not c.allows_any(Constraint("win32")) + assert c.allows_any(Constraint("linux")) + assert c.allows_any(UnionConstraint(Constraint("win32"), Constraint("linux"))) + assert c.allows_any(Constraint("linux", "!=")) + + +def test_allows_all() -> None: + c = Constraint("win32") + + assert c.allows_all(Constraint("win32")) + assert not c.allows_all(Constraint("linux")) + assert not c.allows_all(Constraint("linux", "!=")) + assert not c.allows_all(UnionConstraint(Constraint("win32"), Constraint("linux"))) + + +@pytest.mark.parametrize( + ("constraint1", "constraint2", "expected"), + [ + ( + Constraint("win32"), + Constraint("win32"), + Constraint("win32"), + ), + ( + Constraint("win32"), + Constraint("linux"), + EmptyConstraint(), + ), + ( + Constraint("win32"), + UnionConstraint(Constraint("win32"), Constraint("linux")), + Constraint("win32"), + ), + ( + Constraint("win32"), + UnionConstraint(Constraint("linux"), Constraint("linux2")), + EmptyConstraint(), + ), + ( + Constraint("win32"), + Constraint("linux", "!="), + Constraint("win32"), + ), + ( + Constraint("win32", "!="), + Constraint("linux"), + Constraint("linux"), + ), + ( + Constraint("win32", "!="), + Constraint("linux", "!="), + MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")), + ), + ( + UnionConstraint(Constraint("win32"), Constraint("linux")), + UnionConstraint(Constraint("win32"), Constraint("darwin")), + Constraint("win32"), + ), + ( + UnionConstraint(Constraint("win32"), Constraint("linux")), + MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")), + Constraint("linux"), + ), + ], +) +def test_intersect( + constraint1: BaseConstraint, + constraint2: BaseConstraint, + expected: BaseConstraint, +) -> None: + intersection = constraint1.intersect(constraint2) + assert intersection == expected + + +@pytest.mark.parametrize( + ("constraint1", "constraint2", "expected"), + [ + ( + Constraint("win32"), + Constraint("win32"), + Constraint("win32"), + ), + ( + Constraint("win32"), + Constraint("linux"), + UnionConstraint(Constraint("win32"), Constraint("linux")), + ), + ( + Constraint("win32"), + UnionConstraint(Constraint("win32"), Constraint("linux")), + UnionConstraint(Constraint("win32"), Constraint("linux")), + ), + ( + Constraint("win32"), + UnionConstraint(Constraint("linux"), Constraint("linux2")), + UnionConstraint( + Constraint("win32"), Constraint("linux"), Constraint("linux2") + ), + ), + ( + Constraint("win32"), + Constraint("linux", "!="), + Constraint("linux", "!="), + ), + ( + Constraint("win32", "!="), + Constraint("linux"), + Constraint("win32", "!="), + ), + ( + Constraint("win32", "!="), + Constraint("linux", "!="), + AnyConstraint(), + ), + ], +) +def test_union( + constraint1: BaseConstraint, + constraint2: BaseConstraint, + expected: BaseConstraint, +) -> None: + union = constraint1.union(constraint2) + assert union == expected + + +def test_difference() -> None: + c = Constraint("win32") + + assert c.difference(Constraint("win32")).is_empty() + assert c.difference(Constraint("linux")) == c + + +@pytest.mark.parametrize( + "constraint", + [ + EmptyConstraint(), + AnyConstraint(), + Constraint("win32"), + UnionConstraint(Constraint("win32"), Constraint("linux")), + MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")), + ], +) +def test_constraints_are_hashable(constraint: BaseConstraint) -> None: + # We're just testing that constraints are hashable, there's nothing much to say + # about the result. + hash(constraint) diff --git a/tests/constraints/generic/test_main.py b/tests/constraints/generic/test_main.py new file mode 100644 index 0000000..2707fd9 --- /dev/null +++ b/tests/constraints/generic/test_main.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +import pytest + +from poetry.core.constraints.generic import AnyConstraint +from poetry.core.constraints.generic import Constraint +from poetry.core.constraints.generic import MultiConstraint +from poetry.core.constraints.generic import UnionConstraint +from poetry.core.constraints.generic import parse_constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ("*", AnyConstraint()), + ("win32", Constraint("win32", "=")), + ("=win32", Constraint("win32", "=")), + ("==win32", Constraint("win32", "=")), + ("!=win32", Constraint("win32", "!=")), + ("!= win32", Constraint("win32", "!=")), + ], +) +def test_parse_constraint(input: str, constraint: AnyConstraint | Constraint) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ( + "!=win32,!=linux", + MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")), + ), + ( + "!=win32,!=linux,!=linux2", + MultiConstraint( + Constraint("win32", "!="), + Constraint("linux", "!="), + Constraint("linux2", "!="), + ), + ), + ], +) +def test_parse_constraint_multi(input: str, constraint: MultiConstraint) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ("win32 || linux", UnionConstraint(Constraint("win32"), Constraint("linux"))), + ( + "win32 || !=linux2", + UnionConstraint(Constraint("win32"), Constraint("linux2", "!=")), + ), + ], +) +def test_parse_constraint_union(input: str, constraint: UnionConstraint) -> None: + assert parse_constraint(input) == constraint diff --git a/tests/constraints/generic/test_multi_constraint.py b/tests/constraints/generic/test_multi_constraint.py new file mode 100644 index 0000000..583305d --- /dev/null +++ b/tests/constraints/generic/test_multi_constraint.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from poetry.core.constraints.generic import Constraint +from poetry.core.constraints.generic import MultiConstraint + + +def test_allows() -> None: + c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")) + + assert not c.allows(Constraint("win32")) + assert not c.allows(Constraint("linux")) + assert c.allows(Constraint("darwin")) + + +def test_allows_any() -> None: + c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")) + + assert c.allows_any(Constraint("darwin")) + assert c.allows_any(Constraint("darwin", "!=")) + assert not c.allows_any(Constraint("win32")) + assert c.allows_any(c) + assert c.allows_any( + MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")) + ) + + +def test_allows_all() -> None: + c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")) + + assert c.allows_all(Constraint("darwin")) + assert c.allows_all(Constraint("darwin", "!=")) + assert not c.allows_all(Constraint("win32")) + assert c.allows_all(c) + assert not c.allows_all( + MultiConstraint(Constraint("win32", "!="), Constraint("darwin", "!=")) + ) + + +def test_intersect() -> None: + c = MultiConstraint(Constraint("win32", "!="), Constraint("linux", "!=")) + + intersection = c.intersect(Constraint("win32", "!=")) + assert intersection == Constraint("win32", "!=") diff --git a/tests/constraints/generic/test_union_constraint.py b/tests/constraints/generic/test_union_constraint.py new file mode 100644 index 0000000..59d3a9d --- /dev/null +++ b/tests/constraints/generic/test_union_constraint.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from poetry.core.constraints.generic import Constraint +from poetry.core.constraints.generic import UnionConstraint + + +def test_allows() -> None: + c = UnionConstraint(Constraint("win32"), Constraint("linux")) + + assert c.allows(Constraint("win32")) + assert c.allows(Constraint("linux")) + assert not c.allows(Constraint("darwin")) + + +def test_allows_any() -> None: + c = UnionConstraint(Constraint("win32"), Constraint("linux")) + + assert c.allows_any(c) + assert c.allows_any(UnionConstraint(Constraint("win32"), Constraint("darwin"))) + assert not c.allows_any(UnionConstraint(Constraint("linux2"), Constraint("darwin"))) + assert c.allows_any(Constraint("win32")) + assert not c.allows_any(Constraint("darwin")) + + +def test_allows_all() -> None: + c = UnionConstraint(Constraint("win32"), Constraint("linux")) + + assert c.allows_all(c) + assert not c.allows_all(UnionConstraint(Constraint("win32"), Constraint("darwin"))) + assert not c.allows_all(UnionConstraint(Constraint("linux2"), Constraint("darwin"))) + assert c.allows_all(Constraint("win32")) + assert not c.allows_all(Constraint("darwin")) diff --git a/tests/constraints/version/__init__.py b/tests/constraints/version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/constraints/version/test_helpers.py b/tests/constraints/version/test_helpers.py new file mode 100644 index 0000000..a0f7962 --- /dev/null +++ b/tests/constraints/version/test_helpers.py @@ -0,0 +1,452 @@ +from __future__ import annotations + +import pytest + +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.constraints.version import VersionUnion +from poetry.core.constraints.version import parse_constraint +from poetry.core.version.pep440 import ReleaseTag + + +@pytest.mark.parametrize( + "input,constraint", + [ + ("*", VersionRange()), + ("*.*", VersionRange()), + ("v*.*", VersionRange()), + ("*.x.*", VersionRange()), + ("x.X.x.*", VersionRange()), + (">1.0.0", VersionRange(min=Version.from_parts(1, 0, 0))), + ("<1.2.3", VersionRange(max=Version.from_parts(1, 2, 3))), + ("<=1.2.3", VersionRange(max=Version.from_parts(1, 2, 3), include_max=True)), + (">=1.2.3", VersionRange(min=Version.from_parts(1, 2, 3), include_min=True)), + ("=1.2.3", Version.from_parts(1, 2, 3)), + ("1.2.3", Version.from_parts(1, 2, 3)), + ("1!2.3.4", Version.from_parts(2, 3, 4, epoch=1)), + ("=1.0", Version.from_parts(1, 0, 0)), + ("1.2.3b5", Version.from_parts(1, 2, 3, pre=ReleaseTag("beta", 5))), + (">= 1.2.3", VersionRange(min=Version.from_parts(1, 2, 3), include_min=True)), + ( + ">dev", + VersionRange(min=Version.from_parts(0, 0, dev=ReleaseTag("dev"))), + ), # Issue 206 + ], +) +def test_parse_constraint(input: str, constraint: Version | VersionRange) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ( + "v2.*", + VersionRange( + Version.from_parts(2, 0, 0), Version.from_parts(3, 0, 0), True + ), + ), + ( + "2.*.*", + VersionRange( + Version.from_parts(2, 0, 0), Version.from_parts(3, 0, 0), True + ), + ), + ( + "20.*", + VersionRange( + Version.from_parts(20, 0, 0), Version.from_parts(21, 0, 0), True + ), + ), + ( + "20.*.*", + VersionRange( + Version.from_parts(20, 0, 0), Version.from_parts(21, 0, 0), True + ), + ), + ( + "2.0.*", + VersionRange( + Version.from_parts(2, 0, 0), Version.from_parts(2, 1, 0), True + ), + ), + ( + "2.x", + VersionRange( + Version.from_parts(2, 0, 0), Version.from_parts(3, 0, 0), True + ), + ), + ( + "2.x.x", + VersionRange( + Version.from_parts(2, 0, 0), Version.from_parts(3, 0, 0), True + ), + ), + ( + "2.2.X", + VersionRange( + Version.from_parts(2, 2, 0), Version.from_parts(2, 3, 0), True + ), + ), + ("0.*", VersionRange(max=Version.from_parts(1, 0, 0))), + ("0.*.*", VersionRange(max=Version.from_parts(1, 0, 0))), + ("0.x", VersionRange(max=Version.from_parts(1, 0, 0))), + ], +) +def test_parse_constraint_wildcard(input: str, constraint: VersionRange) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ( + "~v1", + VersionRange( + Version.from_parts(1, 0, 0), Version.from_parts(2, 0, 0), True + ), + ), + ( + "~1.0", + VersionRange( + Version.from_parts(1, 0, 0), Version.from_parts(1, 1, 0), True + ), + ), + ( + "~1.0.0", + VersionRange( + Version.from_parts(1, 0, 0), Version.from_parts(1, 1, 0), True + ), + ), + ( + "~1.2", + VersionRange( + Version.from_parts(1, 2, 0), Version.from_parts(1, 3, 0), True + ), + ), + ( + "~1.2.3", + VersionRange( + Version.from_parts(1, 2, 3), Version.from_parts(1, 3, 0), True + ), + ), + ( + "~1.2-beta", + VersionRange( + Version.from_parts(1, 2, 0, pre=ReleaseTag("beta")), + Version.from_parts(1, 3, 0), + True, + ), + ), + ( + "~1.2-b2", + VersionRange( + Version.from_parts(1, 2, 0, pre=ReleaseTag("beta", 2)), + Version.from_parts(1, 3, 0), + True, + ), + ), + ( + "~0.3", + VersionRange( + Version.from_parts(0, 3, 0), Version.from_parts(0, 4, 0), True + ), + ), + ( + "~3.5", + VersionRange( + Version.from_parts(3, 5, 0), Version.from_parts(3, 6, 0), True + ), + ), + ( + "~=3.5", + VersionRange( + Version.from_parts(3, 5, 0), Version.from_parts(4, 0, 0), True + ), + ), # PEP 440 + ( + "~=3.5.3", + VersionRange( + Version.from_parts(3, 5, 3), Version.from_parts(3, 6, 0), True + ), + ), # PEP 440 + ( + "~=3.5.3rc1", + VersionRange( + Version.from_parts(3, 5, 3, pre=ReleaseTag("rc", 1)), + Version.from_parts(3, 6, 0), + True, + ), + ), # PEP 440 + ], +) +def test_parse_constraint_tilde(input: str, constraint: VersionRange) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + ( + "^v1", + VersionRange( + Version.from_parts(1, 0, 0), Version.from_parts(2, 0, 0), True + ), + ), + ("^0", VersionRange(Version.from_parts(0), Version.from_parts(1), True)), + ( + "^0.0", + VersionRange( + Version.from_parts(0, 0, 0), Version.from_parts(0, 1, 0), True + ), + ), + ( + "^1.2", + VersionRange( + Version.from_parts(1, 2, 0), Version.from_parts(2, 0, 0), True + ), + ), + ( + "^1.2.3-beta.2", + VersionRange( + Version.from_parts(1, 2, 3, pre=ReleaseTag("beta", 2)), + Version.from_parts(2, 0, 0), + True, + ), + ), + ( + "^1.2.3", + VersionRange( + Version.from_parts(1, 2, 3), Version.from_parts(2, 0, 0), True + ), + ), + ( + "^0.2.3", + VersionRange( + Version.from_parts(0, 2, 3), Version.from_parts(0, 3, 0), True + ), + ), + ( + "^0.2", + VersionRange( + Version.from_parts(0, 2, 0), Version.from_parts(0, 3, 0), True + ), + ), + ( + "^0.2.0", + VersionRange( + Version.from_parts(0, 2, 0), Version.from_parts(0, 3, 0), True + ), + ), + ( + "^0.0.3", + VersionRange( + Version.from_parts(0, 0, 3), Version.from_parts(0, 0, 4), True + ), + ), + ( + "^0.0.3-alpha.21", + VersionRange( + Version.from_parts(0, 0, 3, pre=ReleaseTag("alpha", 21)), + Version.from_parts(0, 0, 4), + True, + ), + ), + ( + "^0.1.3-alpha.21", + VersionRange( + Version.from_parts(0, 1, 3, pre=ReleaseTag("alpha", 21)), + Version.from_parts(0, 2, 0), + True, + ), + ), + ( + "^0.0.0-alpha.21", + VersionRange( + Version.from_parts(0, 0, 0, pre=ReleaseTag("alpha", 21)), + Version.from_parts(0, 0, 1), + True, + ), + ), + ], +) +def test_parse_constraint_caret(input: str, constraint: VersionRange) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input", + [ + ">2.0,<=3.0", + ">2.0 <=3.0", + ">2.0 <=3.0", + ">2.0, <=3.0", + ">2.0 ,<=3.0", + ">2.0 , <=3.0", + ">2.0 , <=3.0", + "> 2.0 <= 3.0", + "> 2.0 , <= 3.0", + " > 2.0 , <= 3.0 ", + ], +) +def test_parse_constraint_multi(input: str) -> None: + assert parse_constraint(input) == VersionRange( + Version.from_parts(2, 0, 0), + Version.from_parts(3, 0, 0), + include_min=False, + include_max=True, + ) + + +@pytest.mark.parametrize( + "input, output", + [ + ( + ">1!2,<=2!3", + VersionRange( + Version.from_parts(2, 0, 0, epoch=1), + Version.from_parts(3, 0, 0, epoch=2), + include_min=False, + include_max=True, + ), + ), + ( + ">=1!2,<2!3", + VersionRange( + Version.from_parts(2, 0, 0, epoch=1), + Version.from_parts(3, 0, 0, epoch=2), + include_min=True, + include_max=False, + ), + ), + ], +) +def test_parse_constraint_multi_with_epochs(input: str, output: VersionRange) -> None: + assert parse_constraint(input) == output + + +@pytest.mark.parametrize( + "input", + [">=2.7,!=3.0.*,!=3.1.*", ">=2.7, !=3.0.*, !=3.1.*", ">= 2.7, != 3.0.*, != 3.1.*"], +) +def test_parse_constraint_multi_wilcard(input: str) -> None: + assert parse_constraint(input) == VersionUnion( + VersionRange( + Version.from_parts(2, 7, 0), Version.from_parts(3, 0, 0), True, False + ), + VersionRange(Version.from_parts(3, 2, 0), None, True, False), + ) + + +@pytest.mark.parametrize( + "input,constraint", + [ + ( + "!=v2.*", + VersionRange(max=Version.parse("2.0")).union( + VersionRange(Version.parse("3.0"), include_min=True) + ), + ), + ( + "!=2.*.*", + VersionRange(max=Version.parse("2.0")).union( + VersionRange(Version.parse("3.0"), include_min=True) + ), + ), + ( + "!=2.0.*", + VersionRange(max=Version.parse("2.0")).union( + VersionRange(Version.parse("2.1"), include_min=True) + ), + ), + ("!=0.*", VersionRange(Version.parse("1.0"), include_min=True)), + ("!=0.*.*", VersionRange(Version.parse("1.0"), include_min=True)), + ], +) +def test_parse_constraints_negative_wildcard( + input: str, constraint: VersionRange +) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input,constraint", + [ + (">3.7,", VersionRange(min=Version.parse("3.7"))), + (">3.7 , ", VersionRange(min=Version.parse("3.7"))), + ( + ">3.7,<3.8,", + VersionRange(min=Version.parse("3.7"), max=Version.parse("3.8")), + ), + ( + ">3.7,||<3.6,", + VersionRange(min=Version.parse("3.7")).union( + VersionRange(max=Version.parse("3.6")) + ), + ), + ( + ">3.7 , || <3.6 , ", + VersionRange(min=Version.parse("3.7")).union( + VersionRange(max=Version.parse("3.6")) + ), + ), + ( + ">3.7, <3.8, || <3.6, >3.5", + VersionRange(min=Version.parse("3.7"), max=Version.parse("3.8")).union( + VersionRange(min=Version.parse("3.5"), max=Version.parse("3.6")) + ), + ), + ], +) +def test_parse_constraints_with_trailing_comma( + input: str, constraint: VersionRange +) -> None: + assert parse_constraint(input) == constraint + + +@pytest.mark.parametrize( + "input, expected", + [ + ("1", "1"), + ("1.2", "1.2"), + ("1.2.3", "1.2.3"), + ("!=1", "!=1"), + ("!=1.2", "!=1.2"), + ("!=1.2.3", "!=1.2.3"), + ("^1", ">=1,<2"), + ("^1.0", ">=1.0,<2.0"), + ("^1.0.0", ">=1.0.0,<2.0.0"), + ("^1.0.0-alpha.1", ">=1.0.0-alpha.1,<2.0.0"), + ("^0", ">=0,<1"), + ("^0.1", ">=0.1,<0.2"), + ("^0.0.2", ">=0.0.2,<0.0.3"), + ("^0.1.2", ">=0.1.2,<0.2.0"), + ("^0-alpha.1", ">=0-alpha.1,<1"), + ("^0.1-alpha.1", ">=0.1-alpha.1,<0.2"), + ("^0.0.2-alpha.1", ">=0.0.2-alpha.1,<0.0.3"), + ("^0.1.2-alpha.1", ">=0.1.2-alpha.1,<0.2.0"), + ("~1", ">=1,<2"), + ("~1.0", ">=1.0,<1.1"), + ("~1.0.0", ">=1.0.0,<1.1.0"), + ], +) +def test_constraints_keep_version_precision(input: str, expected: str) -> None: + assert str(parse_constraint(input)) == expected + + +@pytest.mark.parametrize( + "unsorted, sorted_", + [ + (["1.0.3", "1.0.2", "1.0.1"], ["1.0.1", "1.0.2", "1.0.3"]), + (["1.0.0.2", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.2"]), + (["1.0.0.0", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.0"]), + (["1.0.0.0.0", "1.0.0.0rc2"], ["1.0.0.0rc2", "1.0.0.0.0"]), + (["1.0.0rc2", "1.0.0rc1"], ["1.0.0rc1", "1.0.0rc2"]), + (["1.0.0rc2", "1.0.0b1"], ["1.0.0b1", "1.0.0rc2"]), + ], +) +def test_versions_are_sortable(unsorted: list[str], sorted_: list[str]) -> None: + unsorted_parsed = [Version.parse(u) for u in unsorted] + sorted_parsed = [Version.parse(s) for s in sorted_] + + assert sorted(unsorted_parsed) == sorted_parsed diff --git a/tests/constraints/version/test_parse_constraint.py b/tests/constraints/version/test_parse_constraint.py new file mode 100644 index 0000000..c27a7b2 --- /dev/null +++ b/tests/constraints/version/test_parse_constraint.py @@ -0,0 +1,256 @@ +from __future__ import annotations + +import pytest + +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.constraints.version import VersionUnion +from poetry.core.constraints.version import parse_constraint +from poetry.core.version.pep440 import ReleaseTag + + +@pytest.mark.parametrize( + "constraint,version", + [ + ( + "~=3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(4, 0), + include_min=True, + ), + ), + ( + "== 3.8.*", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9, 0), + include_min=True, + ), + ), + ( + "== 3.8.x", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9, 0), + include_min=True, + ), + ), + ( + "~= 3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(4, 0), + include_min=True, + ), + ), + ( + "~3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9), + include_min=True, + ), + ), + ( + "~ 3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9), + include_min=True, + ), + ), + (">3.8", VersionRange(min=Version.from_parts(3, 8))), + (">=3.8", VersionRange(min=Version.from_parts(3, 8), include_min=True)), + (">= 3.8", VersionRange(min=Version.from_parts(3, 8), include_min=True)), + ( + ">3.8,<=6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + ">3.8,<= 6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + "> 3.8,<= 6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + "> 3.8,<=6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + ">3.8 ,<=6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + ">3.8, <=6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + ">3.8 , <=6.5", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(6, 5), + include_max=True, + ), + ), + ( + "==3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 8), + include_min=True, + include_max=True, + ), + ), + ( + "== 3.8", + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 8), + include_min=True, + include_max=True, + ), + ), + ( + "~2.7 || ~3.8", + VersionUnion( + VersionRange( + min=Version.from_parts(2, 7), + max=Version.from_parts(2, 8), + include_min=True, + ), + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9), + include_min=True, + ), + ), + ), + ( + "~2.7||~3.8", + VersionUnion( + VersionRange( + min=Version.from_parts(2, 7), + max=Version.from_parts(2, 8), + include_min=True, + ), + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9), + include_min=True, + ), + ), + ), + ( + "~ 2.7||~ 3.8", + VersionUnion( + VersionRange( + min=Version.from_parts(2, 7), + max=Version.from_parts(2, 8), + include_min=True, + ), + VersionRange( + min=Version.from_parts(3, 8), + max=Version.from_parts(3, 9), + include_min=True, + ), + ), + ), + ( + "^1.0.0a1", + VersionRange( + min=Version.from_parts(1, 0, 0, pre=ReleaseTag("a", 1)), + max=Version.from_parts(2, 0, 0), + include_min=True, + ), + ), + ( + "^1.0.0a1.dev0", + VersionRange( + min=Version.from_parts( + 1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0) + ), + max=Version.from_parts(2, 0, 0), + include_min=True, + ), + ), + ( + "1.0.0a1.dev0", + VersionRange( + min=Version.from_parts( + 1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0) + ), + max=Version.from_parts( + 1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0) + ), + include_min=True, + ), + ), + ( + "~1.0.0a1", + VersionRange( + min=Version.from_parts(1, 0, 0, pre=ReleaseTag("a", 1)), + max=Version.from_parts(1, 1, 0), + include_min=True, + ), + ), + ( + "~1.0.0a1.dev0", + VersionRange( + min=Version.from_parts( + 1, 0, 0, pre=ReleaseTag("a", 1), dev=ReleaseTag("dev", 0) + ), + max=Version.from_parts(1, 1, 0), + include_min=True, + ), + ), + ( + "^0", + VersionRange( + min=Version.from_parts(0), + max=Version.from_parts(1), + include_min=True, + ), + ), + ( + "^0.0", + VersionRange( + min=Version.from_parts(0, 0), + max=Version.from_parts(0, 1), + include_min=True, + ), + ), + ], +) +@pytest.mark.parametrize(("with_whitespace_padding",), [(True,), (False,)]) +def test_parse_constraint( + constraint: str, version: VersionRange | VersionUnion, with_whitespace_padding: bool +) -> None: + padding = " " * (4 if with_whitespace_padding else 0) + assert parse_constraint(f"{padding}{constraint}{padding}") == version diff --git a/tests/constraints/version/test_utils.py b/tests/constraints/version/test_utils.py new file mode 100644 index 0000000..f562c87 --- /dev/null +++ b/tests/constraints/version/test_utils.py @@ -0,0 +1,83 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.constraints.version import EmptyConstraint +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.constraints.version import constraint_regions + + +if TYPE_CHECKING: + from poetry.core.constraints.version import VersionConstraint + + +PY27 = Version.parse("2.7") +PY30 = Version.parse("3") +PY36 = Version.parse("3.6.0") +PY37 = Version.parse("3.7") +PY38 = Version.parse("3.8.0") +PY40 = Version.parse("4.0.0") + + +@pytest.mark.parametrize( + "versions, expected", + [ + ([VersionRange(None, None)], [VersionRange(None, None)]), + ([EmptyConstraint()], [VersionRange(None, None)]), + ( + [VersionRange(PY27, None, include_min=True)], + [ + VersionRange(None, PY27, include_max=False), + VersionRange(PY27, None, include_min=True), + ], + ), + ( + [VersionRange(None, PY40, include_max=False)], + [ + VersionRange(None, PY40, include_max=False), + VersionRange(PY40, None, include_min=True), + ], + ), + ( + [VersionRange(PY27, PY27, include_min=True, include_max=True)], + [ + VersionRange(None, PY27, include_max=False), + VersionRange(PY27, PY27, include_min=True, include_max=True), + VersionRange(PY27, None, include_min=False), + ], + ), + ( + [VersionRange(PY27, PY30, include_min=True, include_max=False)], + [ + VersionRange(None, PY27, include_max=False), + VersionRange(PY27, PY30, include_min=True, include_max=False), + VersionRange(PY30, None, include_min=True), + ], + ), + ( + [ + VersionRange(PY27, PY30, include_min=True, include_max=False).union( + VersionRange(PY37, PY40, include_min=False, include_max=True) + ), + VersionRange(PY36, PY38, include_min=True, include_max=False), + ], + [ + VersionRange(None, PY27, include_max=False), + VersionRange(PY27, PY30, include_min=True, include_max=False), + VersionRange(PY30, PY36, include_min=True, include_max=False), + VersionRange(PY36, PY37, include_min=True, include_max=True), + VersionRange(PY37, PY38, include_min=False, include_max=False), + VersionRange(PY38, PY40, include_min=True, include_max=True), + VersionRange(PY40, None, include_min=False), + ], + ), + ], +) +def test_constraint_regions( + versions: list[VersionConstraint], expected: list[VersionRange] +) -> None: + regions = constraint_regions(versions) + assert regions == expected diff --git a/tests/constraints/version/test_version.py b/tests/constraints/version/test_version.py new file mode 100644 index 0000000..c53513a --- /dev/null +++ b/tests/constraints/version/test_version.py @@ -0,0 +1,529 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.constraints.version import EmptyConstraint +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.version.exceptions import InvalidVersion +from poetry.core.version.pep440 import ReleaseTag + + +if TYPE_CHECKING: + from poetry.core.constraints.version import VersionConstraint + + +@pytest.mark.parametrize( + "text,version", + [ + ("1.0.0", Version.from_parts(1, 0, 0)), + ("1", Version.from_parts(1, 0, 0)), + ("1.0", Version.from_parts(1, 0, 0)), + ("1b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))), + ("1.0b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))), + ("1.0.0b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))), + ("1.0.0-b1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))), + ("1.0.0-beta.1", Version.from_parts(1, 0, 0, pre=ReleaseTag("beta", 1))), + ("1.0.0+1", Version.from_parts(1, 0, 0, local=1)), + ("1.0.0-1", Version.from_parts(1, 0, 0, post=ReleaseTag("post", 1))), + ("1.0.0.0", Version.from_parts(1, 0, 0, extra=0)), + ("1.0.0-post", Version.from_parts(1, 0, 0, post=ReleaseTag("post"))), + ("1.0.0-post1", Version.from_parts(1, 0, 0, post=ReleaseTag("post", 1))), + ("0.6c", Version.from_parts(0, 6, 0, pre=ReleaseTag("rc", 0))), + ("0.6pre", Version.from_parts(0, 6, 0, pre=ReleaseTag("preview", 0))), + ("1!2.3.4", Version.from_parts(2, 3, 4, epoch=1)), + ], +) +def test_parse_valid(text: str, version: Version) -> None: + parsed = Version.parse(text) + + assert parsed == version + assert parsed.text == text + + +@pytest.mark.parametrize("value", [None, "example"]) +def test_parse_invalid(value: str | None) -> None: + with pytest.raises(InvalidVersion): + Version.parse(value) # type: ignore[arg-type] + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "1"), + ("1.2", "1.2"), + ("1.2.3", "1.2.3"), + ("2!1.2.3", "2!1.2.3"), + ("1.2.3+local", "1.2.3+local"), + ("1.2.3.4", "1.2.3.4"), + ("1.dev0", "1"), + ("1.2dev0", "1.2"), + ("1.2.3dev0", "1.2.3"), + ("1.2.3.4dev0", "1.2.3.4"), + ("1.post1", "1.post1"), + ("1.2.post1", "1.2.post1"), + ("1.2.3.post1", "1.2.3.post1"), + ("1.post1.dev0", "1.post1"), + ("1.2.post1.dev0", "1.2.post1"), + ("1.2.3.post1.dev0", "1.2.3.post1"), + ("1.a1", "1"), + ("1.2a1", "1.2"), + ("1.2.3a1", "1.2.3"), + ("1.2.3.4a1", "1.2.3.4"), + ("1.a1.post2", "1"), + ("1.2a1.post2", "1.2"), + ("1.2.3a1.post2", "1.2.3"), + ("1.2.3.4a1.post2", "1.2.3.4"), + ("1.a1.post2.dev0", "1"), + ("1.2a1.post2.dev0", "1.2"), + ("1.2.3a1.post2.dev0", "1.2.3"), + ("1.2.3.4a1.post2.dev0", "1.2.3.4"), + ], +) +def test_stable(version: str, expected: str) -> None: + subject = Version.parse(version) + + assert subject.stable.text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "2"), + ("1.2", "2.0"), + ("1.2.3", "2.0.0"), + ("2!1.2.3", "2!2.0.0"), + ("1.2.3+local", "2.0.0"), + ("1.2.3.4", "2.0.0.0"), + ("1.dev0", "2"), + ("1.2dev0", "2.0"), + ("1.2.3dev0", "2.0.0"), + ("1.2.3.4dev0", "2.0.0.0"), + ("1.post1", "2"), + ("1.2.post1", "2.0"), + ("1.2.3.post1", "2.0.0"), + ("1.post1.dev0", "2"), + ("1.2.post1.dev0", "2.0"), + ("1.2.3.post1.dev0", "2.0.0"), + ("2.a1", "3"), + ("2.2a1", "3.0"), + ("2.2.3a1", "3.0.0"), + ("2.2.3.4a1", "3.0.0.0"), + ("2.a1.post2", "3"), + ("2.2a1.post2", "3.0"), + ("2.2.3a1.post2", "3.0.0"), + ("2.2.3.4a1.post2", "3.0.0.0"), + ("2.a1.post2.dev0", "3"), + ("2.2a1.post2.dev0", "3.0"), + ("2.2.3a1.post2.dev0", "3.0.0"), + ("2.2.3.4a1.post2.dev0", "3.0.0.0"), + ], +) +def test_next_breaking_for_major_over_0_results_into_next_major_and_preserves_precision( + version: str, expected: str +) -> None: + subject = Version.parse(version) + + assert subject.next_breaking().text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("0", "1"), + ("0.0", "0.1"), + ("0.2", "0.3"), + ("0.2.3", "0.3.0"), + ("2!0.2.3", "2!0.3.0"), + ("0.2.3+local", "0.3.0"), + ("0.2.3.4", "0.3.0.0"), + ("0.0.3.4", "0.0.4.0"), + ("0.dev0", "1"), + ("0.0dev0", "0.1"), + ("0.2dev0", "0.3"), + ("0.2.3dev0", "0.3.0"), + ("0.0.3dev0", "0.0.4"), + ("0.post1", "1"), + ("0.0.post1", "0.1"), + ("0.2.post1", "0.3"), + ("0.2.3.post1", "0.3.0"), + ("0.0.3.post1", "0.0.4"), + ("0.post1.dev0", "1"), + ("0.0.post1.dev0", "0.1"), + ("0.2.post1.dev0", "0.3"), + ("0.2.3.post1.dev0", "0.3.0"), + ("0.0.3.post1.dev0", "0.0.4"), + ("0.a1", "1"), + ("0.0a1", "0.1"), + ("0.2a1", "0.3"), + ("0.2.3a1", "0.3.0"), + ("0.2.3.4a1", "0.3.0.0"), + ("0.0.3.4a1", "0.0.4.0"), + ("0.a1.post2", "1"), + ("0.0a1.post2", "0.1"), + ("0.2a1.post2", "0.3"), + ("0.2.3a1.post2", "0.3.0"), + ("0.2.3.4a1.post2", "0.3.0.0"), + ("0.0.3.4a1.post2", "0.0.4.0"), + ("0.a1.post2.dev0", "1"), + ("0.0a1.post2.dev0", "0.1"), + ("0.2a1.post2.dev0", "0.3"), + ("0.2.3a1.post2.dev0", "0.3.0"), + ("0.2.3.4a1.post2.dev0", "0.3.0.0"), + ("0.0.3.4a1.post2.dev0", "0.0.4.0"), + ("0-alpha.1", "1"), + ("0.0-alpha.1", "0.1"), + ("0.2-alpha.1", "0.3"), + ("0.0.1-alpha.2", "0.0.2"), + ("0.1.2-alpha.1", "0.2.0"), + ], +) +def test_next_breaking_for_major_0_is_treated_with_more_care_and_preserves_precision( + version: str, expected: str +) -> None: + subject = Version.parse(version) + + assert subject.next_breaking().text == expected + + +@pytest.mark.parametrize( + "versions", + [ + [ + "1.0.0-alpha", + "1.0.0-alpha.1", + "1.0.0-beta.2", + "1.0.0-beta.11", + "1.0.0-rc.1", + "1.0.0-rc.1+build.1", + "1.0.0", + "1.0.0+0.3.7", + "1.3.7+build", + "1.3.7+build.2.b8f12d7", + "1.3.7+build.11.e0f985a", + "2.0.0", + "2.1.0", + "2.2.0", + "2.11.0", + "2.11.1", + ], + # PEP 440 example comparisons + [ + "1.0.dev456", + "1.0.dev456+local", + "1.0.dev457", + "1.0a1", + "1.0a1+local", + "1.0a2.dev456", + "1.0a2.dev456+local", + "1.0a2.dev457", + "1.0a2", + "1.0a12.dev455", + "1.0a12", + "1.0b1.dev456", + "1.0b2", + "1.0b2.post345.dev456", + "1.0b2.post345", + "1.0rc1.dev456", + "1.0rc1", + "1.0", + "1.0+local", + "1.0.post456.dev34", + "1.0.post456.dev34+local", + "1.0.post456.dev35", + "1.0.post456", + "1.0.post456+local", + "1.0.post457", + "1.1.dev1", + ], + # PEP 440 local versions + [ + "1.0", + # Comparison and ordering of local versions considers each segment + # of the local version (divided by a .) separately. + "1.0+abc.2", + # If a segment consists entirely of ASCII digits then + # that section should be considered an integer for comparison purposes + "1.0+abc.10", + # and if a segment contains any ASCII letters then + # that segment is compared lexicographically with case insensitivity. + "1.0+ABD.1", + # When comparing a numeric and lexicographic segment, the numeric section + # always compares as greater than the lexicographic segment. + "1.0+5", + # Additionally, a local version with a great number of segments will always + # compare as greater than a local version with fewer segments, + # as long as the shorter local version's segments match the beginning of + # the longer local version's segments exactly. + "1.0+5.0", + "1.1", + ], + ], +) +def test_comparison(versions: list[str]) -> None: + for i in range(len(versions)): + for j in range(len(versions)): + a = Version.parse(versions[i]) + b = Version.parse(versions[j]) + + assert (a < b) == (i < j) + assert (a > b) == (i > j) + assert (a <= b) == (i <= j) + assert (a >= b) == (i >= j) + assert (a == b) == (i == j) + assert (a != b) == (i != j) + + +def test_equality() -> None: + assert Version.parse("1.2.3") == Version.parse("01.2.3") + assert Version.parse("1.2.3") == Version.parse("1.02.3") + assert Version.parse("1.2.3") == Version.parse("1.2.03") + assert Version.parse("1.2.3-1") == Version.parse("1.2.3-01") + assert Version.parse("1.2.3+1") == Version.parse("1.2.3+01") + + +def test_allows() -> None: + v = Version.parse("1.2.3") + assert v.allows(v) + assert not v.allows(Version.parse("2.2.3")) + assert not v.allows(Version.parse("1.3.3")) + assert not v.allows(Version.parse("1.2.4")) + assert not v.allows(Version.parse("1.2.3-dev")) + assert not v.allows(Version.parse("1.2.3-1")) + assert not v.allows(Version.parse("1.2.3-1+build")) + assert v.allows(Version.parse("1.2.3+build")) + + +def test_allows_with_local() -> None: + v = Version.parse("1.2.3+build.1") + assert v.allows(v) + assert not v.allows(Version.parse("1.2.3")) + assert not v.allows(Version.parse("1.3.3")) + assert not v.allows(Version.parse("1.2.3-dev")) + assert not v.allows(Version.parse("1.2.3+build.2")) + # local version with a great number of segments will always compare as + # greater than a local version with fewer segments + assert not v.allows(Version.parse("1.2.3+build.1.0")) + assert not v.allows(Version.parse("1.2.3-1")) + assert not v.allows(Version.parse("1.2.3-1+build.1")) + + +def test_allows_with_post() -> None: + v = Version.parse("1.2.3-1") + assert v.allows(v) + assert not v.allows(Version.parse("1.2.3")) + assert not v.allows(Version.parse("1.2.3-2")) + assert not v.allows(Version.parse("2.2.3")) + assert not v.allows(Version.parse("1.2.3-dev")) + assert not v.allows(Version.parse("1.2.3+build.2")) + assert v.allows(Version.parse("1.2.3-1+build.1")) + + +def test_allows_all() -> None: + v = Version.parse("1.2.3") + + assert v.allows_all(v) + assert not v.allows_all(Version.parse("0.0.3")) + assert not v.allows_all( + VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")) + ) + assert not v.allows_all(VersionRange()) + assert v.allows_all(EmptyConstraint()) + + +@pytest.mark.parametrize( + ("version1", "version2", "expected"), + [ + ( + Version.parse("1.2.3"), + Version.parse("1.2.3"), + True, + ), + ( + Version.parse("1.2.3"), + Version.parse("1.2.3+cpu"), + True, + ), + ( + Version.parse("1.2.3+cpu"), + Version.parse("1.2.3"), + False, + ), + ( + Version.parse("1.2.3"), + Version.parse("0.0.3"), + False, + ), + ( + Version.parse("1.2.3"), + VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")), + True, + ), + ( + Version.parse("1.2.3"), + VersionRange(), + True, + ), + ( + Version.parse("1.2.3"), + EmptyConstraint(), + False, + ), + ], +) +def test_allows_any( + version1: VersionConstraint, + version2: VersionConstraint, + expected: bool, +) -> None: + actual = version1.allows_any(version2) + assert actual == expected + + +@pytest.mark.parametrize( + ("version1", "version2", "expected"), + [ + ( + Version.parse("1.2.3"), + Version.parse("1.1.4"), + EmptyConstraint(), + ), + ( + Version.parse("1.2.3"), + VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")), + Version.parse("1.2.3"), + ), + ( + Version.parse("1.1.4"), + VersionRange(Version.parse("1.2.3"), Version.parse("1.2.4")), + EmptyConstraint(), + ), + ( + Version.parse("1.2.3"), + Version.parse("1.2.3.post0"), + EmptyConstraint(), + ), + ( + Version.parse("1.2.3"), + Version.parse("1.2.3+local"), + Version.parse("1.2.3+local"), + ), + ], +) +def test_intersect( + version1: VersionConstraint, + version2: VersionConstraint, + expected: VersionConstraint, +) -> None: + assert version1.intersect(version2) == expected + assert version2.intersect(version1) == expected + + +def test_union() -> None: + v = Version.parse("1.2.3") + + assert v.union(v) == v + + result = v.union(Version.parse("0.8.0")) + assert result.allows(v) + assert result.allows(Version.parse("0.8.0")) + assert not result.allows(Version.parse("1.1.4")) + + range = VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")) + assert v.union(range) == range + + union = Version.parse("1.1.4").union( + VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")) + ) + assert union == VersionRange( + Version.parse("1.1.4"), Version.parse("1.2.4"), include_min=True + ) + + result = v.union(VersionRange(Version.parse("0.0.3"), Version.parse("1.1.4"))) + assert result.allows(v) + assert result.allows(Version.parse("0.1.0")) + + +def test_difference() -> None: + v = Version.parse("1.2.3") + + assert v.difference(v).is_empty() + assert v.difference(Version.parse("0.8.0")) == v + assert v.difference( + VersionRange(Version.parse("1.1.4"), Version.parse("1.2.4")) + ).is_empty() + assert ( + v.difference(VersionRange(Version.parse("1.4.0"), Version.parse("3.0.0"))) == v + ) + + +@pytest.mark.parametrize( + "version,normalized_version", + [ + ( # already normalized version + "1!2.3.4.5.6a7.post8.dev9+local1.123.abc", + "1!2.3.4.5.6a7.post8.dev9+local1.123.abc", + ), + # PEP 440 Normalization + # Case sensitivity + ("1.1RC1", "1.1rc1"), + # Integer Normalization + ("00", "0"), + ("09000", "9000"), + ("1.0+foo0100", "1.0+foo0100"), + # Pre-release separators + ("1.1.a1", "1.1a1"), + ("1.1-a1", "1.1a1"), + ("1.1_a1", "1.1a1"), + ("1.1a.1", "1.1a1"), + ("1.1a-1", "1.1a1"), + ("1.1a_1", "1.1a1"), + # Pre-release spelling + ("1.1alpha1", "1.1a1"), + ("1.1beta2", "1.1b2"), + ("1.1c3", "1.1rc3"), + ("1.1pre4", "1.1rc4"), + ("1.1preview5", "1.1rc5"), + # Implicit pre-release number + ("1.2a", "1.2a0"), + # Post release separators + ("1.2.post2", "1.2.post2"), + ("1.2-post2", "1.2.post2"), + ("1.2_post2", "1.2.post2"), + ("1.2post.2", "1.2.post2"), + ("1.2post-2", "1.2.post2"), + ("1.2post_2", "1.2.post2"), + # Post release spelling + ("1.0-r4", "1.0.post4"), + ("1.0-rev4", "1.0.post4"), + # Implicit post release number + ("1.2.post", "1.2.post0"), + # Implicit post releases + ("1.0-1", "1.0.post1"), + # Development release separators + ("1.2.dev2", "1.2.dev2"), + ("1.2-dev2", "1.2.dev2"), + ("1.2_dev2", "1.2.dev2"), + ("1.2dev.2", "1.2.dev2"), + ("1.2dev-2", "1.2.dev2"), + ("1.2dev_2", "1.2.dev2"), + # Implicit development release number + ("1.2.dev", "1.2.dev0"), + # Local version segments + ("1.0+ubuntu-1", "1.0+ubuntu.1"), + ("1.0+ubuntu_1", "1.0+ubuntu.1"), + # Preceding v character + ("v1.0", "1.0"), + # Leading and Trailing Whitespace + (" 1.0 ", "1.0"), + ("\t1.0\t", "1.0"), + ("\n1.0\n", "1.0"), + ("\r\n1.0\r\n", "1.0"), + ("\f1.0\f", "1.0"), + ("\v1.0\v", "1.0"), + ], +) +def test_to_string_normalizes(version: str, normalized_version: str) -> None: + assert Version.parse(version).to_string() == normalized_version diff --git a/tests/constraints/version/test_version_constraint.py b/tests/constraints/version/test_version_constraint.py new file mode 100644 index 0000000..49834de --- /dev/null +++ b/tests/constraints/version/test_version_constraint.py @@ -0,0 +1,32 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.constraints.version import EmptyConstraint +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange +from poetry.core.constraints.version import VersionUnion + + +if TYPE_CHECKING: + from poetry.core.constraints.version import VersionConstraint + + +@pytest.mark.parametrize( + "constraint", + [ + EmptyConstraint(), + Version.parse("1"), + VersionRange(Version.parse("1"), Version.parse("2")), + VersionUnion( + VersionRange(Version.parse("1"), Version.parse("2")), + VersionRange(Version.parse("3"), Version.parse("4")), + ), + ], +) +def test_constraints_are_hashable(constraint: VersionConstraint) -> None: + # We're just testing that constraints are hashable, there's nothing much to say + # about the result. + hash(constraint) diff --git a/tests/constraints/version/test_version_range.py b/tests/constraints/version/test_version_range.py new file mode 100644 index 0000000..fcac247 --- /dev/null +++ b/tests/constraints/version/test_version_range.py @@ -0,0 +1,460 @@ +from __future__ import annotations + +import pytest + +from poetry.core.constraints.version import EmptyConstraint +from poetry.core.constraints.version import Version +from poetry.core.constraints.version import VersionRange + + +@pytest.fixture() +def v003() -> Version: + return Version.parse("0.0.3") + + +@pytest.fixture() +def v010() -> Version: + return Version.parse("0.1.0") + + +@pytest.fixture() +def v080() -> Version: + return Version.parse("0.8.0") + + +@pytest.fixture() +def v072() -> Version: + return Version.parse("0.7.2") + + +@pytest.fixture() +def v114() -> Version: + return Version.parse("1.1.4") + + +@pytest.fixture() +def v123() -> Version: + return Version.parse("1.2.3") + + +@pytest.fixture() +def v124() -> Version: + return Version.parse("1.2.4") + + +@pytest.fixture() +def v130() -> Version: + return Version.parse("1.3.0") + + +@pytest.fixture() +def v140() -> Version: + return Version.parse("1.4.0") + + +@pytest.fixture() +def v200() -> Version: + return Version.parse("2.0.0") + + +@pytest.fixture() +def v234() -> Version: + return Version.parse("2.3.4") + + +@pytest.fixture() +def v250() -> Version: + return Version.parse("2.5.0") + + +@pytest.fixture() +def v300() -> Version: + return Version.parse("3.0.0") + + +@pytest.fixture() +def v300b1() -> Version: + return Version.parse("3.0.0b1") + + +@pytest.mark.parametrize( + "base,other", + [ + pytest.param(Version.parse("3.0.0"), Version.parse("3.0.0-1"), id="post"), + pytest.param( + Version.parse("3.0.0"), Version.parse("3.0.0+local.1"), id="local" + ), + ], +) +def test_allows_post_releases_with_max(base: Version, other: Version) -> None: + range = VersionRange(max=base, include_max=True) + assert range.allows(other) + + +@pytest.mark.parametrize( + "base,other", + [ + pytest.param(Version.parse("3.0.0"), Version.parse("3.0.0-1"), id="post"), + pytest.param( + Version.parse("3.0.0"), Version.parse("3.0.0+local.1"), id="local" + ), + ], +) +def test_allows_post_releases_with_min(base: Version, other: Version) -> None: + range = VersionRange(min=base, include_min=True) + assert range.allows(other) + + +def test_allows_post_releases_with_post_and_local_min() -> None: + one = Version.parse("3.0.0+local.1") + two = Version.parse("3.0.0-1") + three = Version.parse("3.0.0-1+local.1") + four = Version.parse("3.0.0+local.2") + + assert VersionRange(min=one, include_min=True).allows(two) + assert VersionRange(min=one, include_min=True).allows(three) + assert VersionRange(min=one, include_min=True).allows(four) + + assert not VersionRange(min=two, include_min=True).allows(one) + assert VersionRange(min=two, include_min=True).allows(three) + assert not VersionRange(min=two, include_min=True).allows(four) + + assert not VersionRange(min=three, include_min=True).allows(one) + assert not VersionRange(min=three, include_min=True).allows(two) + assert not VersionRange(min=three, include_min=True).allows(four) + + assert not VersionRange(min=four, include_min=True).allows(one) + assert VersionRange(min=four, include_min=True).allows(two) + assert VersionRange(min=four, include_min=True).allows(three) + + +def test_allows_post_releases_with_post_and_local_max() -> None: + one = Version.parse("3.0.0+local.1") + two = Version.parse("3.0.0-1") + three = Version.parse("3.0.0-1+local.1") + four = Version.parse("3.0.0+local.2") + + assert VersionRange(max=one, include_max=True).allows(two) + assert VersionRange(max=one, include_max=True).allows(three) + assert not VersionRange(max=one, include_max=True).allows(four) + + assert VersionRange(max=two, include_max=True).allows(one) + assert VersionRange(max=two, include_max=True).allows(three) + assert VersionRange(max=two, include_max=True).allows(four) + + assert VersionRange(max=three, include_max=True).allows(one) + assert VersionRange(max=three, include_max=True).allows(two) + assert VersionRange(max=three, include_max=True).allows(four) + + assert VersionRange(max=four, include_max=True).allows(one) + assert VersionRange(max=four, include_max=True).allows(two) + assert VersionRange(max=four, include_max=True).allows(three) + + +@pytest.mark.parametrize( + "base,one,two", + [ + pytest.param( + Version.parse("3.0.0"), + Version.parse("3.0.0-1"), + Version.parse("3.0.0-2"), + id="post", + ), + pytest.param( + Version.parse("3.0.0"), + Version.parse("3.0.0+local.1"), + Version.parse("3.0.0+local.2"), + id="local", + ), + ], +) +def test_allows_post_releases_explicit_with_max( + base: Version, one: Version, two: Version +) -> None: + range = VersionRange(max=one, include_max=True) + assert range.allows(base) + assert not range.allows(two) + + range = VersionRange(max=two, include_max=True) + assert range.allows(base) + assert range.allows(one) + + +@pytest.mark.parametrize( + "base,one,two", + [ + pytest.param( + Version.parse("3.0.0"), + Version.parse("3.0.0-1"), + Version.parse("3.0.0-2"), + id="post", + ), + pytest.param( + Version.parse("3.0.0"), + Version.parse("3.0.0+local.1"), + Version.parse("3.0.0+local.2"), + id="local", + ), + ], +) +def test_allows_post_releases_explicit_with_min( + base: Version, one: Version, two: Version +) -> None: + range = VersionRange(min=one, include_min=True) + assert not range.allows(base) + assert range.allows(two) + + range = VersionRange(min=two, include_min=True) + assert not range.allows(base) + assert not range.allows(one) + + +def test_allows_all( + v123: Version, v124: Version, v140: Version, v250: Version, v300: Version +) -> None: + assert VersionRange(v123, v250).allows_all(EmptyConstraint()) + + range = VersionRange(v123, v250, include_max=True) + assert not range.allows_all(v123) + assert range.allows_all(v124) + assert range.allows_all(v250) + assert not range.allows_all(v300) + + +def test_allows_all_with_no_min( + v080: Version, v140: Version, v250: Version, v300: Version +) -> None: + range = VersionRange(max=v250) + assert range.allows_all(VersionRange(v080, v140)) + assert not range.allows_all(VersionRange(v080, v300)) + assert range.allows_all(VersionRange(max=v140)) + assert not range.allows_all(VersionRange(max=v300)) + assert range.allows_all(range) + assert not range.allows_all(VersionRange()) + + +def test_allows_all_with_no_max( + v003: Version, v010: Version, v080: Version, v140: Version +) -> None: + range = VersionRange(min=v010) + assert range.allows_all(VersionRange(v080, v140)) + assert not range.allows_all(VersionRange(v003, v140)) + assert range.allows_all(VersionRange(v080)) + assert not range.allows_all(VersionRange(v003)) + assert range.allows_all(range) + assert not range.allows_all(VersionRange()) + + +def test_allows_all_bordering_range_not_more_inclusive( + v010: Version, v250: Version +) -> None: + # Allows bordering range that is not more inclusive + exclusive = VersionRange(v010, v250) + inclusive = VersionRange(v010, v250, True, True) + assert inclusive.allows_all(exclusive) + assert inclusive.allows_all(inclusive) + assert not exclusive.allows_all(inclusive) + assert exclusive.allows_all(exclusive) + + +def test_allows_all_contained_unions( + v010: Version, + v114: Version, + v123: Version, + v124: Version, + v140: Version, + v200: Version, + v234: Version, +) -> None: + # Allows unions that are completely contained + range = VersionRange(v114, v200) + assert range.allows_all(VersionRange(v123, v124).union(v140)) + assert not range.allows_all(VersionRange(v010, v124).union(v140)) + assert not range.allows_all(VersionRange(v123, v234).union(v140)) + + +def test_allows_any( + v003: Version, + v010: Version, + v072: Version, + v080: Version, + v114: Version, + v123: Version, + v124: Version, + v140: Version, + v200: Version, + v234: Version, + v250: Version, + v300: Version, +) -> None: + # disallows an empty constraint + assert not VersionRange(v123, v250).allows_any(EmptyConstraint()) + + # allows allowed versions + range = VersionRange(v123, v250, include_max=True) + assert not range.allows_any(v123) + assert range.allows_any(v124) + assert range.allows_any(v250) + assert not range.allows_any(v300) + + # with no min + range = VersionRange(max=v200) + assert range.allows_any(VersionRange(v140, v300)) + assert not range.allows_any(VersionRange(v234, v300)) + assert range.allows_any(VersionRange(v140)) + assert not range.allows_any(VersionRange(v234)) + assert range.allows_any(range) + + # with no max + range = VersionRange(min=v072) + assert range.allows_any(VersionRange(v003, v140)) + assert not range.allows_any(VersionRange(v003, v010)) + assert range.allows_any(VersionRange(max=v080)) + assert not range.allows_any(VersionRange(max=v003)) + assert range.allows_any(range) + + # with min and max + range = VersionRange(v072, v200) + assert range.allows_any(VersionRange(v003, v140)) + assert range.allows_any(VersionRange(v140, v300)) + assert not range.allows_any(VersionRange(v003, v010)) + assert not range.allows_any(VersionRange(v234, v300)) + assert not range.allows_any(VersionRange(max=v010)) + assert not range.allows_any(VersionRange(v234)) + assert range.allows_any(range) + + # allows a bordering range when both are inclusive + assert not VersionRange(max=v250).allows_any(VersionRange(min=v250)) + assert not VersionRange(max=v250, include_max=True).allows_any( + VersionRange(min=v250) + ) + assert not VersionRange(max=v250).allows_any( + VersionRange(min=v250, include_min=True) + ) + assert not VersionRange(min=v250).allows_any(VersionRange(max=v250)) + assert VersionRange(max=v250, include_max=True).allows_any( + VersionRange(min=v250, include_min=True) + ) + + # allows unions that are partially contained' + range = VersionRange(v114, v200) + assert range.allows_any(VersionRange(v010, v080).union(v140)) + assert range.allows_any(VersionRange(v123, v234).union(v300)) + assert not range.allows_any(VersionRange(v234, v300).union(v010)) + + # pre-release min does not allow lesser than itself + range = VersionRange(Version.parse("1.9b1"), include_min=True) + assert not range.allows_any( + VersionRange(Version.parse("1.8.0"), Version.parse("1.9.0"), include_min=True) + ) + + +def test_intersect( + v114: Version, + v123: Version, + v124: Version, + v200: Version, + v250: Version, + v300: Version, +) -> None: + # two overlapping ranges + assert VersionRange(v123, v250).intersect(VersionRange(v200, v300)) == VersionRange( + v200, v250 + ) + + # a non-overlapping range allows no versions + a = VersionRange(v114, v124) + b = VersionRange(v200, v250) + assert a.intersect(b).is_empty() + + # adjacent ranges allow no versions if exclusive + a = VersionRange(v114, v124) + b = VersionRange(v124, v200) + assert a.intersect(b).is_empty() + + # adjacent ranges allow version if inclusive + a = VersionRange(v114, v124, include_max=True) + b = VersionRange(v124, v200, include_min=True) + assert a.intersect(b) == v124 + + # with an open range + open = VersionRange() + a = VersionRange(v114, v124) + assert open.intersect(open) == open + assert open.intersect(a) == a + + # returns the version if the range allows it + assert VersionRange(v114, v124).intersect(v123) == v123 + assert VersionRange(v123, v124).intersect(v114).is_empty() + + +def test_union( + v003: Version, + v010: Version, + v072: Version, + v080: Version, + v114: Version, + v123: Version, + v124: Version, + v130: Version, + v140: Version, + v200: Version, + v234: Version, + v250: Version, + v300: Version, +) -> None: + # with a version returns the range if it contains the version + range = VersionRange(v114, v124) + assert range.union(v123) == range + + # with a version on the edge of the range, expands the range + range = VersionRange(v114, v124) + assert range.union(v124) == VersionRange(v114, v124, include_max=True) + assert range.union(v114) == VersionRange(v114, v124, include_min=True) + + # with a version allows both the range and the version if the range + # doesn't contain the version + result = VersionRange(v003, v114).union(v124) + assert result.allows(v010) + assert not result.allows(v123) + assert result.allows(v124) + + # returns a VersionUnion for a disjoint range + result = VersionRange(v003, v114).union(VersionRange(v130, v200)) + assert result.allows(v080) + assert not result.allows(v123) + assert result.allows(v140) + + # considers open ranges disjoint + result = VersionRange(v003, v114).union(VersionRange(v114, v200)) + assert result.allows(v080) + assert not result.allows(v114) + assert result.allows(v140) + result = VersionRange(v114, v200).union(VersionRange(v003, v114)) + assert result.allows(v080) + assert not result.allows(v114) + assert result.allows(v140) + + # returns a merged range for an overlapping range + result = VersionRange(v003, v114).union(VersionRange(v080, v200)) + assert result == VersionRange(v003, v200) + + # considers closed ranges overlapping + result = VersionRange(v003, v114, include_max=True).union(VersionRange(v114, v200)) + assert result == VersionRange(v003, v200) + result = VersionRange(v003, v114).union(VersionRange(v114, v200, include_min=True)) + assert result == VersionRange(v003, v200) + + +def test_include_max_prerelease(v200: Version, v300: Version, v300b1: Version) -> None: + result = VersionRange(v200, v300) + + assert not result.allows(v300b1) + assert not result.allows_any(VersionRange(v300b1)) + assert not result.allows_all(VersionRange(v200, v300b1)) + + result = VersionRange(v200, v300, always_include_max_prerelease=True) + + assert result.allows(v300b1) + assert result.allows_any(VersionRange(v300b1)) + assert result.allows_all(VersionRange(v200, v300b1)) diff --git a/tests/fixtures/complete.toml b/tests/fixtures/complete.toml new file mode 100644 index 0000000..8d45ac0 --- /dev/null +++ b/tests/fixtures/complete.toml @@ -0,0 +1,47 @@ +[tool.poetry] +name = "poetry" +version = "0.5.0" +description = "Python dependency management and packaging made easy." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +# Requirements +[tool.poetry.dependencies] +python = "~2.7 || ^3.2" # Compatible python versions must be declared here +toml = "^0.9" +# Dependencies with extras +requests = { version = "^2.13", extras = [ "security" ] } +# Python specific dependencies with prereleases allowed +pathlib2 = { version = "^2.2", python = "~2.7", allows-prereleases = true } +# Git dependencies +cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" } + +# Optional dependencies (extras) +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.extras] +time = [ "pendulum" ] + +[tool.poetry.dev-dependencies] +pytest = "^3.0" +pytest-cov = "^2.4" + +[tool.poetry.scripts] +my-script = 'my_package:main' +sample_pyscript = { reference = "script-files/sample_script.py", type= "file" } +sample_shscript = { reference = "script-files/sample_script.sh", type= "file" } + + +[[tool.poetry.source]] +name = "foo" +url = "https://bar.com" diff --git a/tests/fixtures/distributions/demo-0.1.0-py2.py3-none-any.whl b/tests/fixtures/distributions/demo-0.1.0-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..9e0805593b652818425370fb6973486b80c689ba GIT binary patch literal 1116 zcmWIWW@Zs#fB;2?Ca%JlVn7ZE3jlFSYHq%Me0*kJW=VX!UO{Did|7Hyab|vAe7u6K zg1Uj8p`L*{R{+!u4xkQ24~MBd?BPIG1Q1I>&Cms_)Jw@MF44`*OUu^}_i%Of;XUQZ z)u152dSUPK4@}Bw7Y=c|?Cw~x_2BZB8J(Wu_x9cU^;W##GF$EDf-Q4Dr?EH(3-*2! z6v@4|TW`lf9i`S~)1R%{@mVnUBG=|+pZj%pEdP4s(1IgpYfzoTp?0=Fijjfg7!w17 zEH3Bxx`sHqIEFY*op{&lu!BJB`|p~^;xY}rC%JKJYj%mTWb251t-9s&ILR#fyZamE zpECPqMBnyW5x&!U&gZ!^8+WR=t6KHUI~lQ3no-j~qOpr*$`X^`pOfc(m~nPbP{uW; z8*9Tv)^6Z>5L_5;vqvOHN&L!*qz%?eFx1YRk zJ&Vcu=J7sGYSYCM*&79iH2i;l5b68G*k4(%J!!AU_oM^A-|TYBei{@MVQ0F_XvLHq ztr-cyKMRaI9{l&ayJCy>`F2_9=5;pbt<28>k+pf7PJsL-9)HPtt7fKW8UHH1?^fOG z-Qss(kyEPp@<8Uo~0L;%gRjE z+Pb~5clO8^JbgWT9{0j?r#KS#{%^{>VZHAA4!{28CWoI@pZI_5{HiMl&+QJF|E*x- zSMjE`leWyY@Sf+#TQFzlpL~T)Ioy#!zHFg6S-39y>E~IgG0h4)%9v2b8sN>yB*Kh4w*UhI43;#4DD=F8(2YHxK=d*& zG%&hAbz{#p=;oj&aD+LRahU^4?&wCLM=Qc84Pfp8c?iQvkVp>jW@Q5@W&uJ_X47E? F@c^7EgWUiC literal 0 HcmV?d00001 diff --git a/tests/fixtures/distributions/demo-0.1.0.tar.gz b/tests/fixtures/distributions/demo-0.1.0.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..133b64421f86ad26448e0b0f27be33df6808c14c GIT binary patch literal 961 zcmV;y13vs8iwFpsLE~Ej|72xtZ!It`F)lDJbYXG;?N{Ay+cp^WHJ^ghF4P(kBhj*) zXmik^>(*gg8ni%fGEZP_#^WN18bxI>((Qfr9DA{Tq~yeQ(jgvF@25OBCJ#UUisyVZ z*(du?PT1vh&SqToJ_y1V@B0nIL3~p-d=$k)+QXN<9iXW-Q!xEQZ}L9^dVmWp3OLwZOCG)j_$qMpP59y4CZAS4k9_f2s>%S0)3mXT`lp!!eZ`2G5)$%g(1NtE<( z==A@=k!2TL)k2n8hR4{qH^Fzx|7bv5{&)Gm&Hr(f&@iGQIT(gA2Ca7cuhi8CiZiR=smw_Sd4e`*7 z*Ha}b-LW2Gjs8ih8Y-vWt3UsE!)h&fiJzHMopStn%{%G|Zz&?=kvBnTbzU;#6)$JJ zoC;n~P_=+D?fAb_GQw});vWYj_K=Bb@}@8?N8^aZhMeYH0&%sn*1orgrClS-p{`ZK ze?#UP$+zOEV#C;0;r@+Kqe1mM(%@+v2rZfCt-wv;5cTzvjBB z9`fCPPyI)fh8y~)bQC)M?*gCVpda9=oQZOt;zpkZ2ZljsN)~t~YTam&*JLUc$D*jD z(%9Y;geo>YXLD|}{{uSp%|-Gj*K{^xn%Sl+-%s(W-}fgVEm6X8=Xs|1FO5*wotM(? zWNpT2b+-)cMgrB@!8+CXlRfN%FfC_1m4sq+maz^XXtHb$6V{q#|{K**Hjr7|>v@FnNVLmJG0 zJw^zkE(9dJ;F-w*DTefWi-Yug^YOlf2SbWYCjP9=8+Z)wT?p|uW3C_bkGh=ycr&8Q j@v8&D{GQ7Y4h{|u4h{|u4h{|ue+%yb(`BL(04M+eDE0Bh literal 0 HcmV?d00001 diff --git a/tests/fixtures/invalid_pyproject/pyproject.toml b/tests/fixtures/invalid_pyproject/pyproject.toml new file mode 100644 index 0000000..06a8e27 --- /dev/null +++ b/tests/fixtures/invalid_pyproject/pyproject.toml @@ -0,0 +1,11 @@ +[tool.poetry] +name = "invalid" +version = "1.0.0" +authors = [ + "Foo " +] +license = "INVALID" + +[tool.poetry.dependencies] +python = "*" +pendulum = {"version" = "^2.0.5", allows-prereleases = true} diff --git a/tests/fixtures/pep_517_backend/README.md b/tests/fixtures/pep_517_backend/README.md new file mode 100644 index 0000000..8e0a375 --- /dev/null +++ b/tests/fixtures/pep_517_backend/README.md @@ -0,0 +1,2 @@ +This fixture allows testing a project that uses the repository version of `poetry-core` +as a PEP 517 backend. diff --git a/tests/fixtures/pep_517_backend/foo/__init__.py b/tests/fixtures/pep_517_backend/foo/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/pep_517_backend/pyproject.toml b/tests/fixtures/pep_517_backend/pyproject.toml new file mode 100644 index 0000000..f4698dd --- /dev/null +++ b/tests/fixtures/pep_517_backend/pyproject.toml @@ -0,0 +1,37 @@ +[tool.poetry] +name = "foo" +version = "1.2.3" +description = "Some description." +authors = ["Foo "] +license = "MIT" +readme = "README.md" + +homepage = "https://example.com" +repository = "https://github.com/example/example" +documentation = "https://example.com" + +keywords = ["example", "packaging"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +[tool.poetry.dependencies] +python = "^3.7" +attrs = "^22.1.0" + +[tool.poetry.group.dev.dependencies] +pytest = "7.1.3" + +# Non-regression test for https://github.com/python-poetry/poetry-core/pull/492. +# The underlying issue occurred because `tomlkit` can either return a TOML table as `Table` instance or an +# `OutOfOrderProxy` one, if a table is discontinuous and multiple sections of a table are separated by a non-related +# table, but we were too strict in our type check assertions. +# So adding `tool.black` here ensure that we have discontinuous tables, so that we don't re-introduce the issue caused +# by the type check assertion that ended up being reverted. +[tool.black] +preview = true + +[tool.poetry.scripts] +my-script = "my_package:main" diff --git a/tests/fixtures/project_failing_strict_validation/pyproject.toml b/tests/fixtures/project_failing_strict_validation/pyproject.toml new file mode 100644 index 0000000..6d282ba --- /dev/null +++ b/tests/fixtures/project_failing_strict_validation/pyproject.toml @@ -0,0 +1,12 @@ +[tool.poetry] +readme = ["README.rst", "README_WITH_ANOTHER_EXTENSION.md"] + +[tool.poetry.dependencies] +python = "*" +pathlib2 = { version = "^2.2", python = "3.7", allows-prereleases = true } + +[tool.poetry.scripts] +a_script_with_unknown_extra = { reference = "a_script_with_unknown_extra.py", type = "file", extras = ["foo"] } +a_script_without_extras = { reference = "a_script_without_extras.py", type = "file" } +a_script_with_empty_extras = { reference = "a_script_with_empty_extras.py", type = "file", extras = [] } +another_script = "another_script:main" diff --git a/tests/fixtures/project_with_build_system_requires/pyproject.toml b/tests/fixtures/project_with_build_system_requires/pyproject.toml new file mode 100644 index 0000000..3485c42 --- /dev/null +++ b/tests/fixtures/project_with_build_system_requires/pyproject.toml @@ -0,0 +1,22 @@ +[build-system] +requires = [ + "poetry-core", + "Cython~=0.29.6", +] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "poetry-cython-example" +version = "0.1.0" +description = "" +authors = [] +include = [{ path = "project/**/*.so", format = "wheel" }] + +[tool.poetry.build] +generate-setup-file = false +script = "build.py" + +[tool.poetry.dependencies] +python = "^3.7" + +[tool.poetry.dev-dependencies] diff --git a/tests/fixtures/project_with_groups_and_explicit_main/README.rst b/tests/fixtures/project_with_groups_and_explicit_main/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/fixtures/project_with_groups_and_explicit_main/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/fixtures/project_with_groups_and_explicit_main/pyproject.toml b/tests/fixtures/project_with_groups_and_explicit_main/pyproject.toml new file mode 100644 index 0000000..18c6a59 --- /dev/null +++ b/tests/fixtures/project_with_groups_and_explicit_main/pyproject.toml @@ -0,0 +1,17 @@ +[tool.poetry] +name = "simple-project" +version = "0.1.0" +description = "" +authors = ["Your Name "] + +[tool.poetry.dependencies] +python = "^3.7" + +[tool.poetry.group.main.dependencies] +aiohttp = "^2.17.0" + +[tools.poetry] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/tests/fixtures/project_with_groups_and_explicit_main/simple_project/__init__.py b/tests/fixtures/project_with_groups_and_explicit_main/simple_project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/project_with_groups_and_legacy_dev/README.rst b/tests/fixtures/project_with_groups_and_legacy_dev/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/fixtures/project_with_groups_and_legacy_dev/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/fixtures/project_with_groups_and_legacy_dev/pyproject.toml b/tests/fixtures/project_with_groups_and_legacy_dev/pyproject.toml new file mode 100644 index 0000000..6e239c7 --- /dev/null +++ b/tests/fixtures/project_with_groups_and_legacy_dev/pyproject.toml @@ -0,0 +1,20 @@ +[tool.poetry] +name = "simple-project" +version = "0.1.0" +description = "" +authors = ["Your Name "] + +[tool.poetry.dependencies] +python = "^3.7" + +[tool.poetry.group.dev.dependencies] +pre-commit = "^2.17.0" + +[tool.poetry.dev-dependencies] +pytest = "^5.2" + +[tools.poetry] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/tests/fixtures/project_with_groups_and_legacy_dev/simple_project/__init__.py b/tests/fixtures/project_with_groups_and_legacy_dev/simple_project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/project_with_invalid_dev_deps/pyproject.toml b/tests/fixtures/project_with_invalid_dev_deps/pyproject.toml new file mode 100644 index 0000000..1d0b5a8 --- /dev/null +++ b/tests/fixtures/project_with_invalid_dev_deps/pyproject.toml @@ -0,0 +1,13 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = ["Awesome Hacker "] +license = "MIT" + +[tool.poetry.dependencies] + +[tool.poetry.extras] + +[tool.poetry.dev-dependencies] +mylib = { path = "../mylib", develop = true} diff --git a/tests/fixtures/project_with_markers_and_extras/project/__init__.py b/tests/fixtures/project_with_markers_and_extras/project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/project_with_markers_and_extras/pyproject.toml b/tests/fixtures/project_with_markers_and_extras/pyproject.toml new file mode 100644 index 0000000..a945b07 --- /dev/null +++ b/tests/fixtures/project_with_markers_and_extras/pyproject.toml @@ -0,0 +1,20 @@ +[tool.poetry] +name = "project-with-markers-and-extras" +version = "1.2.3" +description = "This is a description" +authors = ["Your Name "] +license = "MIT" + +packages = [ + {include = "project"} +] + +[tool.poetry.dependencies] +python = "*" +orjson = [ + { url = "https://example/location/orjson-3.8.0-cp310-cp310-manylinux_2_28_x86_64.whl", platform = "linux", optional = true }, + { url = "https://example/location/orjson-3.8.0-cp310-cp310-macosx_10_9_x86_64.macosx_11_0_arm64.macosx_10_9_universal2.whl", platform = "darwin", optional = true } +] + +[tool.poetry.extras] +all = ["orjson"] diff --git a/tests/fixtures/project_with_multi_constraints_dependency/project/__init__.py b/tests/fixtures/project_with_multi_constraints_dependency/project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/project_with_multi_constraints_dependency/pyproject.toml b/tests/fixtures/project_with_multi_constraints_dependency/pyproject.toml new file mode 100644 index 0000000..69462a6 --- /dev/null +++ b/tests/fixtures/project_with_multi_constraints_dependency/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "project-with-multi-constraints-dependency" +version = "1.2.3" +description = "This is a description" +authors = ["Your Name "] +license = "MIT" + +packages = [ + {include = "project"} +] + +[tool.poetry.dependencies] +python = "*" +pendulum = [ + { version = "^1.5", python = "<3.4" }, + { version = "^2.0", python = "^3.4" } +] + +[tool.poetry.dev-dependencies] diff --git a/tests/fixtures/project_with_pep517_non_poetry/pyproject.toml b/tests/fixtures/project_with_pep517_non_poetry/pyproject.toml new file mode 100644 index 0000000..8b36a60 --- /dev/null +++ b/tests/fixtures/project_with_pep517_non_poetry/pyproject.toml @@ -0,0 +1,13 @@ +[build-system] +requires = ["flit_core >=3.7.1,<4"] +build-backend = "flit_core.buildapi" + +[project] +name = "flit" +authors = [] +dependencies = [ + "flit_core >=3.7.1", +] +requires-python = ">=3.6" +readme = "README.rst" +dynamic = ['version', 'description'] diff --git a/tests/fixtures/project_with_setup/my_package/__init__.py b/tests/fixtures/project_with_setup/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/project_with_setup/setup.py b/tests/fixtures/project_with_setup/setup.py new file mode 100644 index 0000000..ce86fe3 --- /dev/null +++ b/tests/fixtures/project_with_setup/setup.py @@ -0,0 +1,14 @@ +from setuptools import setup + + +setup( + name="my-package", + license="MIT", + version="0.1.2", + description="Demo project.", + author="Sébastien Eustace", + author_email="sebastien@eustace.io", + url="https://github.com/demo/demo", + packages=["my_package"], + install_requires=["pendulum>=1.4.4", "cachy[msgpack]>=0.2.0"], +) diff --git a/tests/fixtures/project_with_setup_cfg_only/setup.cfg b/tests/fixtures/project_with_setup_cfg_only/setup.cfg new file mode 100644 index 0000000..b0f4352 --- /dev/null +++ b/tests/fixtures/project_with_setup_cfg_only/setup.cfg @@ -0,0 +1,18 @@ +[metadata] +name = my_package +version = attr: my_package.VERSION +description = My package description +long_description = file: README.rst, CHANGELOG.rst, LICENSE.rst +keywords = one, two +license = BSD 3-Clause License +classifiers = + Framework :: Django + Programming Language :: Python :: 3 + +[options] +zip_safe = False +include_package_data = True +packages = find: +install_requires = + requests + importlib-metadata; python_version<"3.8" diff --git a/tests/fixtures/sample_project/README.rst b/tests/fixtures/sample_project/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/fixtures/sample_project/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/fixtures/sample_project/pyproject.toml b/tests/fixtures/sample_project/pyproject.toml new file mode 100644 index 0000000..76712c6 --- /dev/null +++ b/tests/fixtures/sample_project/pyproject.toml @@ -0,0 +1,71 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "~2.7 || ^3.6" +cleo = "^0.6" +pendulum = { git = "https://github.com/sdispater/pendulum.git", branch = "2.0" } +tomlkit = { git = "https://github.com/sdispater/tomlkit.git", rev = "3bff550", develop = false } +requests = { version = "^2.18", optional = true, extras=[ "security" ] } +pathlib2 = { version = "^2.2", python = "~2.7" } + +orator = { version = "^0.9", optional = true } + +# File dependency +demo = { path = "../distributions/demo-0.1.0-py2.py3-none-any.whl" } + +# Dir dependency with setup.py +my-package = { path = "../project_with_setup/" } + +# Dir dependency with pyproject.toml +simple-project = { path = "../simple_project/" } + +# Dependency with markers +functools32 = { version = "^3.2.3", markers = "python_version ~= '2.7' and sys_platform == 'win32' or python_version in '3.4 3.5'" } + +# Dependency with python constraint +dataclasses = {version = "^0.7", python = ">=3.6.1,<3.7"} + + +[tool.poetry.extras] +db = [ "orator" ] + +# Non-regression test for https://github.com/python-poetry/poetry-core/pull/492. +# The underlying issue occurred because `tomlkit` can either return a TOML table as `Table` instance or an +# `OutOfOrderProxy` one, if a table is discontinuous and multiple sections of a table are separated by a non-related +# table, but we were too strict in our type check assertions. +# So adding `tool.black` here ensure that we have discontinuous tables, so that we don't re-introduce the issue caused +# by the type check assertion that ended up being reverted. +[tool.black] +preview = true + +[tool.poetry.group.dev.dependencies] +pytest = "~3.4" + + +[tool.poetry.scripts] +my-script = "my_package:main" + + +[tool.poetry.plugins."blogtool.parsers"] +".rst" = "some_module::SomeClass" diff --git a/tests/fixtures/script-files/sample_script.py b/tests/fixtures/script-files/sample_script.py new file mode 100644 index 0000000..1318d50 --- /dev/null +++ b/tests/fixtures/script-files/sample_script.py @@ -0,0 +1,6 @@ +#!/usr/bin/env python + +from __future__ import annotations + + +hello = "Hello World!" diff --git a/tests/fixtures/script-files/sample_script.sh b/tests/fixtures/script-files/sample_script.sh new file mode 100644 index 0000000..d6954d9 --- /dev/null +++ b/tests/fixtures/script-files/sample_script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "Hello World!" diff --git a/tests/fixtures/simple_project/README.rst b/tests/fixtures/simple_project/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/fixtures/simple_project/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/fixtures/simple_project/dist/simple_project-1.2.3-py2.py3-none-any.whl b/tests/fixtures/simple_project/dist/simple_project-1.2.3-py2.py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..fcdeda3133860f7a41040e0953be83034d01145f GIT binary patch literal 1320 zcmWIWW@Zs#U|`??VnrZkU|<513=C30x;QhpASX4xpeR2pHMvATK0Y%qvm`!Vub`5d zAplvYM@^pYvv8n}NFY|h(5Y*vXQXGWmy%grqMMnQmaiY~;p*zcd&-fkK|z4^!rtW{ zn3SWe^#$H4ExN`wpDW4DEwvm`6-N>XJOp~G}oyk*lXPObG5#*WWk@{`yV1s9qYzEQ?elykxsK41Y&2W}A^T~6C!g167 z-2T=U|EnbV`-0v7x7-e>Q;lR_)3n9x?aAqvLVb6w>AN5OI>2z6{y8_TwB(}*Z(W==luI(_)Q!&nz;>Z%q|0?*9aKZs)VCC$ko|D$ffn% z-l8T028IWB(>(=}7aEj2Q1@DSfk}{WnvBPUB~6x0wy!Uj7rl7GUGrVj?gaPZqFr2N zv#wA1H1YhDtWV-A_PMqF{PyY9V;3&%g3VGvpX@{~aM*5B(fKRaV=*Z)@a^{6ZEyM< z;#bzo-O2E{ouwOY{3N+NR-Exzdwb7wySHxP5`Hn;uGXHK-nC<=#2e^+p@?tBR)>y!lzn9LWgCBBW%jf$D$`EwD8_v2D|n5o-6X;SpSZ5 z!^O*$U3<@8VNdu9O#F;YBFwmRG%#quU`Zp0!kxd-%)!Xt5K|Z!8W^oG%)y@9(M?3p ymusw3=9CGeCVeD literal 0 HcmV?d00001 diff --git a/tests/fixtures/simple_project/dist/simple_project-1.2.3.tar.gz b/tests/fixtures/simple_project/dist/simple_project-1.2.3.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..149aa9527c577a1828a5a3114377960fd1e41a88 GIT binary patch literal 1106 zcmV-Y1g-lYiwFpPU6EV@|8r?=aBO8QaB^>IWn*+LF)lJLGcI&tascgF+j84B5cM;@ z0%ac9YD9<^QSxxAOx)UOJh2ju<@Uu^IW!5&s7ZnaKqs1{{hxkEf2;s6x;gSxX{=6c z?u&rME{N;y!4c}9&Wlm-{uK@)%=R8c%u+V}nx@lTl$GAHx~{zk-|y`}axMZUKmVmC z`IU&h4--KWJg{8b?OS%&>7m}fZS}2vu-C|=7fzBa#zw~IZ#Wc&g>2+>PLB=_hewEU z@x|_cuiM>x|Lxw|{qNen-X1iY`~ToFoWW5L-UL$&26Jg{{*w{ZPpUtW|5;Tb)I{#aiWv|_T*R{+h|L;Q11*I{{C>Crs0cnt6IDp#f zsnEbJW}MJeH5DgB0}VkiFL0?sgg)261jHo7DK7>yJU-I^U@}3KgIl0eslJvU&LnzK4c9_s-0O78*`P&_ zn)J>nCxWusvf-47>--XhH0kh&@GKCRRo-%jG|Us6ilCn3#_l`Pl*_T);MqIMqGHNg zR-h`9PPI1FBAnqg!f7~DwdxBK0Kz!noLmvSyiR8{BO&xWIHp(PUBECr#J4!6nHr7K z|B#b7f@gB$xYqucJ}2ao1&m;>Dx;a)niLLcl*gE>>`Ahv9$L z%T~W{^&9;6|Ls2(dG@)x{TcZG&i;4hQ?%LtUC1+NvRf|b-_g~IU>1V<%WIRYAtWvcMV2(jQM}``X}S~ z&C9ntivO;){{N5DH?1cA??Q%H1S)bEzm&1ngEq3j8ySW@xOz6g+@d%nV4NpOz-Eea zSPQq{mDCuDmGR)lI0p`jpefz&gw?9aDB`Ic#wVgFcwKQVb^Md*tG!@Z+U4g!#c6P( zWIR0hAHpE`aZc7KI;EtiJP*DdAH#!8F48TIRDF1SRyVJ>j2NA=AW7s%;YpBAbH!RM z$QW6&x3x$8Ej^rV?NM)QkJT}4LuPH~cn_lmMpZcb`Siqths#7>8{SXjt-7AhX~Tuz YJ)~ii4I62sk*`Sp0Y" +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "~2.7 || ^3.4" diff --git a/tests/fixtures/simple_project/simple_project/__init__.py b/tests/fixtures/simple_project/simple_project/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/fixtures/with_readme_files/README-1.rst b/tests/fixtures/with_readme_files/README-1.rst new file mode 100644 index 0000000..265d70d --- /dev/null +++ b/tests/fixtures/with_readme_files/README-1.rst @@ -0,0 +1,2 @@ +Single Python +============= diff --git a/tests/fixtures/with_readme_files/README-2.rst b/tests/fixtures/with_readme_files/README-2.rst new file mode 100644 index 0000000..a5693d9 --- /dev/null +++ b/tests/fixtures/with_readme_files/README-2.rst @@ -0,0 +1,2 @@ +Changelog +========= diff --git a/tests/fixtures/with_readme_files/pyproject.toml b/tests/fixtures/with_readme_files/pyproject.toml new file mode 100644 index 0000000..850e511 --- /dev/null +++ b/tests/fixtures/with_readme_files/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "single-python" +version = "0.1" +description = "Some description." +authors = [ + "Wagner Macedo " +] +license = "MIT" + +readme = [ + "README-1.rst", + "README-2.rst" +] + +homepage = "https://python-poetry.org/" + + +[tool.poetry.dependencies] +python = "2.7.15" diff --git a/tests/fixtures/with_readme_files/single_python.py b/tests/fixtures/with_readme_files/single_python.py new file mode 100644 index 0000000..ceb22ae --- /dev/null +++ b/tests/fixtures/with_readme_files/single_python.py @@ -0,0 +1,6 @@ +"""Example module""" + +from __future__ import annotations + + +__version__ = "0.1" diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/integration/test_pep517.py b/tests/integration/test_pep517.py new file mode 100644 index 0000000..0706112 --- /dev/null +++ b/tests/integration/test_pep517.py @@ -0,0 +1,98 @@ +from __future__ import annotations + +import sys + +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest + +# noinspection PyProtectedMember +from build.__main__ import build_package +from build.util import project_wheel_metadata + +from tests.testutils import subprocess_run +from tests.testutils import temporary_project_directory + + +if TYPE_CHECKING: + from _pytest.fixtures import FixtureRequest + +pytestmark = pytest.mark.integration + + +@pytest.mark.parametrize( + "getter, project", + [ + ("common_project", "simple_project"), + ("masonry_project", "src_extended"), + ("masonry_project", "disable_setup_py"), + ], +) +def test_pep517_check_poetry_managed( + request: FixtureRequest, getter: str, project: str +) -> None: + with temporary_project_directory(request.getfixturevalue(getter)(project)) as path: + assert project_wheel_metadata(path) + + +def test_pep517_check(project_source_root: Path) -> None: + assert project_wheel_metadata(str(project_source_root)) + + +def test_pep517_build_sdist( + temporary_directory: Path, project_source_root: Path +) -> None: + build_package( + srcdir=str(project_source_root), + outdir=str(temporary_directory), + distributions=["sdist"], + ) + distributions = list(temporary_directory.glob("poetry_core-*.tar.gz")) + assert len(distributions) == 1 + + +def test_pep517_build_wheel( + temporary_directory: Path, project_source_root: Path +) -> None: + build_package( + srcdir=str(project_source_root), + outdir=str(temporary_directory), + distributions=["wheel"], + ) + distributions = list(temporary_directory.glob("poetry_core-*-none-any.whl")) + assert len(distributions) == 1 + + +def test_pip_wheel_build(temporary_directory: Path, project_source_root: Path) -> None: + tmp = str(temporary_directory) + pip = subprocess_run( + "pip", "wheel", "--use-pep517", "-w", tmp, str(project_source_root) + ) + assert "Successfully built poetry-core" in pip.stdout + + assert pip.returncode == 0 + + wheels = list(Path(tmp).glob("poetry_core-*-none-any.whl")) + assert len(wheels) == 1 + + +@pytest.mark.xfail( + sys.version_info < (3, 8), + # see https://github.com/python/importlib_metadata/issues/392 + reason="importlib-metadata can't be installed with --no-binary anymore", + strict=True, +) +def test_pip_install_no_binary(python: str, project_source_root: Path) -> None: + subprocess_run( + python, + "-m", + "pip", + "install", + "--no-binary", + ":all:", + project_source_root.as_posix(), + ) + + pip_show = subprocess_run(python, "-m", "pip", "show", "poetry-core") + assert "Name: poetry-core" in pip_show.stdout diff --git a/tests/integration/test_pep517_backend.py b/tests/integration/test_pep517_backend.py new file mode 100644 index 0000000..c476c65 --- /dev/null +++ b/tests/integration/test_pep517_backend.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import shutil + +from pathlib import Path + +import pytest + +from tests.testutils import subprocess_run + + +pytestmark = pytest.mark.integration + + +BUILD_SYSTEM_TEMPLATE = """ +[build-system] +requires = ["poetry-core @ file://{project_path}"] +build-backend = "poetry.core.masonry.api" +""" + + +def test_pip_install( + temporary_directory: Path, project_source_root: Path, python: str +) -> None: + """ + Ensure that a project using the repository version of poetry-core as a PEP 517 backend can be built. + """ + temp_pep_517_backend_path = temporary_directory / "pep_517_backend" + + # Copy `pep_517_backend` to a temporary directory as we need to dynamically add the + # build system during the test. This ensures that we don't update the source, since + # the value of `requires` is dynamic. + shutil.copytree( + Path(__file__).parent.parent / "fixtures/pep_517_backend", + temp_pep_517_backend_path, + ) + + # Append dynamic `build-system` section to `pyproject.toml` in the temporary + # project directory. + with open(temp_pep_517_backend_path / "pyproject.toml", "a") as f: + f.write( + BUILD_SYSTEM_TEMPLATE.format(project_path=project_source_root.as_posix()) + ) + + subprocess_run( + python, + "-m", + "pip", + "install", + temp_pep_517_backend_path.as_posix(), + ) + + pip_show = subprocess_run(python, "-m", "pip", "show", "foo") + assert "Name: foo" in pip_show.stdout diff --git a/tests/json/__init__.py b/tests/json/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/json/test_poetry_schema.py b/tests/json/test_poetry_schema.py new file mode 100644 index 0000000..06f5e34 --- /dev/null +++ b/tests/json/test_poetry_schema.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from typing import Any + +import pytest + +from poetry.core.json import validate_object + + +@pytest.fixture +def base_object() -> dict[str, Any]: + return { + "name": "myapp", + "version": "1.0.0", + "description": "Some description.", + "authors": ["Your Name "], + "dependencies": {"python": "^3.6"}, + "dev-dependencies": {}, + } + + +@pytest.fixture +def multi_url_object() -> dict[str, Any]: + return { + "name": "myapp", + "version": "1.0.0", + "description": "Some description.", + "authors": ["Your Name "], + "dependencies": { + "python": [ + { + "url": "https://download.pytorch.org/whl/cpu/torch-1.4.0%2Bcpu-cp37-cp37m-linux_x86_64.whl", + "platform": "linux", + }, + {"path": "../foo", "platform": "darwin"}, + ] + }, + "dev-dependencies": {}, + } + + +def test_path_dependencies(base_object: dict[str, Any]) -> None: + base_object["dependencies"].update({"foo": {"path": "../foo"}}) + base_object["dev-dependencies"].update({"foo": {"path": "../foo"}}) + + assert len(validate_object(base_object, "poetry-schema")) == 0 + + +def test_multi_url_dependencies(multi_url_object: dict[str, Any]) -> None: + assert len(validate_object(multi_url_object, "poetry-schema")) == 0 + + +def test_multiline_description(base_object: dict[str, Any]) -> None: + bad_description = "Some multi-\nline string" + base_object["description"] = bad_description + + errors = validate_object(base_object, "poetry-schema") + assert len(errors) == 1 + assert errors[0] == f"[description] {bad_description!r} does not match '^[^\\n]*$'" diff --git a/tests/masonry/__init__.py b/tests/masonry/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/__init__.py b/tests/masonry/builders/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/build_script_in_subdir/pyproject.toml b/tests/masonry/builders/fixtures/build_script_in_subdir/pyproject.toml new file mode 100644 index 0000000..86dcc30 --- /dev/null +++ b/tests/masonry/builders/fixtures/build_script_in_subdir/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "build_script_in_subdir" +version = "0.1" +description = "Some description." +authors = [ + "Brandon Chinn " +] +license = "MIT" +homepage = "https://python-poetry.org/" +packages = [ + { include = "*", from = "src" }, +] + +build = "scripts/build.py" diff --git a/tests/masonry/builders/fixtures/build_script_in_subdir/scripts/build.py b/tests/masonry/builders/fixtures/build_script_in_subdir/scripts/build.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/build_script_in_subdir/src/foo.py b/tests/masonry/builders/fixtures/build_script_in_subdir/src/foo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/LICENSE b/tests/masonry/builders/fixtures/case_sensitive_exclusions/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/case_sensitive_exclusions/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/README.rst b/tests/masonry/builders/fixtures/case_sensitive_exclusions/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/case_sensitive_exclusions/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/Bar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/Bar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/IncludedBar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/IncludedBar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/SecondBar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/SecondBar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/lowercasebar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/Foo/lowercasebar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/Bar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/Bar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/lowercasebar.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/FooBar/lowercasebar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/__init__.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/CapitalFoo.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/CapitalFoo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/foo.py b/tests/masonry/builders/fixtures/case_sensitive_exclusions/my_package/bar/foo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/case_sensitive_exclusions/pyproject.toml b/tests/masonry/builders/fixtures/case_sensitive_exclusions/pyproject.toml new file mode 100644 index 0000000..3bf793e --- /dev/null +++ b/tests/masonry/builders/fixtures/case_sensitive_exclusions/pyproject.toml @@ -0,0 +1,49 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +exclude = [ + "**/SecondBar.py", + "my_package/FooBar/*", + "my_package/Foo/Bar.py", + "my_package/Foo/lowercasebar.py", + "my_package/bar/foo.py", + "my_package/bar/CapitalFoo.py" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" +extra-script = {reference = "my_package.extra:main", extras = ["time"], type = "console"} diff --git a/tests/masonry/builders/fixtures/comma_file/comma_file/__init__.py b/tests/masonry/builders/fixtures/comma_file/comma_file/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/comma_file/comma_file/a,b.py b/tests/masonry/builders/fixtures/comma_file/comma_file/a,b.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/comma_file/pyproject.toml b/tests/masonry/builders/fixtures/comma_file/pyproject.toml new file mode 100644 index 0000000..110845e --- /dev/null +++ b/tests/masonry/builders/fixtures/comma_file/pyproject.toml @@ -0,0 +1,12 @@ +[tool.poetry] +name = "comma-file" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + diff --git a/tests/masonry/builders/fixtures/complete/LICENSE b/tests/masonry/builders/fixtures/complete/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/complete/README.rst b/tests/masonry/builders/fixtures/complete/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/complete/bin/script.sh b/tests/masonry/builders/fixtures/complete/bin/script.sh new file mode 100644 index 0000000..2a9686a --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/bin/script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "Hello World!" \ No newline at end of file diff --git a/tests/masonry/builders/fixtures/complete/my_package/__init__.py b/tests/masonry/builders/fixtures/complete/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/complete/my_package/data1/test.json b/tests/masonry/builders/fixtures/complete/my_package/data1/test.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/my_package/data1/test.json @@ -0,0 +1 @@ +{} diff --git a/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/__init__.py b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/extra_file.xml b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg1/extra_file.xml new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/__init__.py b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/data.json b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/data.json new file mode 100644 index 0000000..0967ef4 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg2/data2/data.json @@ -0,0 +1 @@ +{} diff --git a/tests/masonry/builders/fixtures/complete/my_package/sub_pkg3/foo.py b/tests/masonry/builders/fixtures/complete/my_package/sub_pkg3/foo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/complete/pyproject.toml b/tests/masonry/builders/fixtures/complete/pyproject.toml new file mode 100644 index 0000000..8b7d2c6 --- /dev/null +++ b/tests/masonry/builders/fixtures/complete/pyproject.toml @@ -0,0 +1,56 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +maintainers = [ + "People Everywhere " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +exclude = [ + "does-not-exist", + "**/*.xml" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +[tool.poetry.dependencies.pendulum] +version = "^1.4" +markers = 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"' +optional = true + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" +file-script = { reference = "bin/script.sh", type = "file" } +extra-script = { reference = "my_package.extra:main", extras = ["time"], type = "console" } + + +[tool.poetry.urls] +"Issue Tracker" = "https://github.com/python-poetry/poetry/issues" diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/LICENSE b/tests/masonry/builders/fixtures/default_src_with_excluded_data/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_src_with_excluded_data/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/README.rst b/tests/masonry/builders/fixtures/default_src_with_excluded_data/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_src_with_excluded_data/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/pyproject.toml b/tests/masonry/builders/fixtures/default_src_with_excluded_data/pyproject.toml new file mode 100644 index 0000000..80e8616 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_src_with_excluded_data/pyproject.toml @@ -0,0 +1,39 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/__init__.py b/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/data1.txt b/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/data1.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data2.txt b/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data3.txt b/tests/masonry/builders/fixtures/default_src_with_excluded_data/src/my_package/data/sub_data/data3.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/LICENSE b/tests/masonry/builders/fixtures/default_with_excluded_data/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/README.rst b/tests/masonry/builders/fixtures/default_with_excluded_data/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/__init__.py b/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/data1.txt b/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/data1.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data2.txt b/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data3.txt b/tests/masonry/builders/fixtures/default_with_excluded_data/my_package/data/sub_data/data3.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data/pyproject.toml b/tests/masonry/builders/fixtures/default_with_excluded_data/pyproject.toml new file mode 100644 index 0000000..80e8616 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data/pyproject.toml @@ -0,0 +1,39 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/LICENSE b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/README.rst b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/__init__.py b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/data1.txt b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/data1.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data2.txt b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data3.txt b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/my_package/data/sub_data/data3.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/default_with_excluded_data_toml/pyproject.toml b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/pyproject.toml new file mode 100644 index 0000000..fcd31fa --- /dev/null +++ b/tests/masonry/builders/fixtures/default_with_excluded_data_toml/pyproject.toml @@ -0,0 +1,41 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +exclude = ["my_package/data/data1.txt"] + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/disable_setup_py/README.rst b/tests/masonry/builders/fixtures/disable_setup_py/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/disable_setup_py/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/disable_setup_py/my_package/__init__.py b/tests/masonry/builders/fixtures/disable_setup_py/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/disable_setup_py/pyproject.toml b/tests/masonry/builders/fixtures/disable_setup_py/pyproject.toml new file mode 100644 index 0000000..372501f --- /dev/null +++ b/tests/masonry/builders/fixtures/disable_setup_py/pyproject.toml @@ -0,0 +1,35 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Team " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +[tool.poetry.build] +generate-setup-file = false + +# Requirements +[tool.poetry.dependencies] +python = "~2.7 || ^3.6" + +[tool.poetry.extras] + +[tool.poetry.dev-dependencies] + +[tool.poetry.scripts] +my-script = "my_package:main" diff --git a/tests/masonry/builders/fixtures/epoch/README.rst b/tests/masonry/builders/fixtures/epoch/README.rst new file mode 100644 index 0000000..ce1f88e --- /dev/null +++ b/tests/masonry/builders/fixtures/epoch/README.rst @@ -0,0 +1,2 @@ +Epoch +===== diff --git a/tests/masonry/builders/fixtures/epoch/epoch.py b/tests/masonry/builders/fixtures/epoch/epoch.py new file mode 100644 index 0000000..8de3a59 --- /dev/null +++ b/tests/masonry/builders/fixtures/epoch/epoch.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "1!2.0" diff --git a/tests/masonry/builders/fixtures/epoch/pyproject.toml b/tests/masonry/builders/fixtures/epoch/pyproject.toml new file mode 100644 index 0000000..87c34f3 --- /dev/null +++ b/tests/masonry/builders/fixtures/epoch/pyproject.toml @@ -0,0 +1,12 @@ +[tool.poetry] +name = "epoch" +version = "1!2.0" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" diff --git a/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/__init__.py b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.c b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.c new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.h b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/compiled/source.h new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/cython_code.pyx b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/exclude_whl_include_sdist/cython_code.pyx new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude-whl-include-sdist/pyproject.toml b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/pyproject.toml new file mode 100644 index 0000000..a684e61 --- /dev/null +++ b/tests/masonry/builders/fixtures/exclude-whl-include-sdist/pyproject.toml @@ -0,0 +1,17 @@ +[tool.poetry] +name = "exclude-whl-include-sdist" +description = "" +authors = [] +version = "0.1.0" +exclude = ["exclude_whl_include_sdist/compiled", "exclude_whl_include_sdist/*.pyx"] +include = [ + { path = "exclude_whl_include_sdist/compiled/**/*", format = "sdist" }, + { path = "exclude_whl_include_sdist/*.pyx", format = "sdist" } +] + +[tool.poetry.dependencies] +python = "^3.9" + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/LICENSE b/tests/masonry/builders/fixtures/exclude_nested_data_toml/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/exclude_nested_data_toml/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/README.rst b/tests/masonry/builders/fixtures/exclude_nested_data_toml/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/exclude_nested_data_toml/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/__init__.py b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data1.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data1.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data2.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/data2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data2.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data3.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/data/sub_data/data3.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/itemdata1.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/itemdata1.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/subitem/subitemdata.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item1/subitem/subitemdata.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item2/itemdata2.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/item2/itemdata2.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/publicdata.txt b/tests/masonry/builders/fixtures/exclude_nested_data_toml/my_package/puplic/publicdata.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/exclude_nested_data_toml/pyproject.toml b/tests/masonry/builders/fixtures/exclude_nested_data_toml/pyproject.toml new file mode 100644 index 0000000..28cff3c --- /dev/null +++ b/tests/masonry/builders/fixtures/exclude_nested_data_toml/pyproject.toml @@ -0,0 +1,42 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +exclude = ["**/data/", "**/*/item*"] +include = ["my_package/data/data2.txt"] + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/excluded_subpackage/README.rst b/tests/masonry/builders/fixtures/excluded_subpackage/README.rst new file mode 100644 index 0000000..b006402 --- /dev/null +++ b/tests/masonry/builders/fixtures/excluded_subpackage/README.rst @@ -0,0 +1,2 @@ +My Package +========== \ No newline at end of file diff --git a/tests/masonry/builders/fixtures/excluded_subpackage/example/__init__.py b/tests/masonry/builders/fixtures/excluded_subpackage/example/__init__.py new file mode 100644 index 0000000..3dc1f76 --- /dev/null +++ b/tests/masonry/builders/fixtures/excluded_subpackage/example/__init__.py @@ -0,0 +1 @@ +__version__ = "0.1.0" diff --git a/tests/masonry/builders/fixtures/excluded_subpackage/example/test/__init__.py b/tests/masonry/builders/fixtures/excluded_subpackage/example/test/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/excluded_subpackage/example/test/excluded.py b/tests/masonry/builders/fixtures/excluded_subpackage/example/test/excluded.py new file mode 100644 index 0000000..273b5df --- /dev/null +++ b/tests/masonry/builders/fixtures/excluded_subpackage/example/test/excluded.py @@ -0,0 +1,5 @@ +from tests.masonry.builders.fixtures.excluded_subpackage.example import __version__ + + +def test_version() -> None: + assert __version__ == "0.1.0" diff --git a/tests/masonry/builders/fixtures/excluded_subpackage/pyproject.toml b/tests/masonry/builders/fixtures/excluded_subpackage/pyproject.toml new file mode 100644 index 0000000..3852bb8 --- /dev/null +++ b/tests/masonry/builders/fixtures/excluded_subpackage/pyproject.toml @@ -0,0 +1,18 @@ +[tool.poetry] +name = "example" +version = "0.1.0" +description = "" +authors = ["Sébastien Eustace "] +exclude = [ + "**/test/**/*", +] + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] +pytest = "^3.0" + +[build-system] +requires = ["poetry>=0.12"] +build-backend = "poetry.masonry.api" diff --git a/tests/masonry/builders/fixtures/extended/README.rst b/tests/masonry/builders/fixtures/extended/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/extended/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/extended/build.py b/tests/masonry/builders/fixtures/extended/build.py new file mode 100644 index 0000000..0b49f7e --- /dev/null +++ b/tests/masonry/builders/fixtures/extended/build.py @@ -0,0 +1,8 @@ +from setuptools import Extension + + +extensions = [Extension("extended.extended", ["extended/extended.c"])] + + +def build(setup_kwargs): + setup_kwargs.update({"ext_modules": extensions}) diff --git a/tests/masonry/builders/fixtures/extended/extended/__init__.py b/tests/masonry/builders/fixtures/extended/extended/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/extended/extended/extended.c b/tests/masonry/builders/fixtures/extended/extended/extended.c new file mode 100644 index 0000000..25a028e --- /dev/null +++ b/tests/masonry/builders/fixtures/extended/extended/extended.c @@ -0,0 +1,58 @@ +#include + + +static PyObject *hello(PyObject *self) { + return PyUnicode_FromString("Hello"); +} + + +static PyMethodDef module_methods[] = { + { + "hello", + (PyCFunction) hello, + NULL, + PyDoc_STR("Say hello.") + }, + {NULL} +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "extended", + NULL, + -1, + module_methods, + NULL, + NULL, + NULL, + NULL, +}; +#endif + +PyMODINIT_FUNC +#if PY_MAJOR_VERSION >= 3 +PyInit_extended(void) +#else +init_extended(void) +#endif +{ + PyObject *module; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule3("extended", module_methods, NULL); +#endif + + if (module == NULL) +#if PY_MAJOR_VERSION >= 3 + return NULL; +#else + return; +#endif + +#if PY_MAJOR_VERSION >= 3 + return module; +#endif +} diff --git a/tests/masonry/builders/fixtures/extended/pyproject.toml b/tests/masonry/builders/fixtures/extended/pyproject.toml new file mode 100644 index 0000000..6266092 --- /dev/null +++ b/tests/masonry/builders/fixtures/extended/pyproject.toml @@ -0,0 +1,15 @@ +[tool.poetry] +name = "extended" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + +[tool.poetry.build] +script = "build.py" diff --git a/tests/masonry/builders/fixtures/extended/setup.py b/tests/masonry/builders/fixtures/extended/setup.py new file mode 100644 index 0000000..1c07efe --- /dev/null +++ b/tests/masonry/builders/fixtures/extended/setup.py @@ -0,0 +1,24 @@ +from setuptools import setup + +packages = ["extended"] + +package_data = {"": ["*"]} + +setup_kwargs = { + "name": "extended", + "version": "0.1", + "description": "Some description.", + "long_description": "Module 1\n========\n", + "author": "Sébastien Eustace", + "author_email": "sebastien@eustace.io", + "maintainer": "None", + "maintainer_email": "None", + "url": "https://python-poetry.org/", + "packages": packages, + "package_data": package_data, +} +from build import * + +build(setup_kwargs) + +setup(**setup_kwargs) diff --git a/tests/masonry/builders/fixtures/extended_with_no_setup/README.rst b/tests/masonry/builders/fixtures/extended_with_no_setup/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/extended_with_no_setup/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/extended_with_no_setup/build.py b/tests/masonry/builders/fixtures/extended_with_no_setup/build.py new file mode 100644 index 0000000..01cf728 --- /dev/null +++ b/tests/masonry/builders/fixtures/extended_with_no_setup/build.py @@ -0,0 +1,28 @@ +import os +import shutil + +from setuptools.command.build_ext import build_ext +from setuptools import Distribution, Extension + + +extensions = [Extension("extended.extended", ["extended/extended.c"])] + + +def build() -> None: + distribution = Distribution({"name": "extended", "ext_modules": extensions}) + + cmd = build_ext(distribution) + cmd.finalize_options() + cmd.run() + + # Copy built extensions back to the project + for output in cmd.get_outputs(): + relative_extension = os.path.relpath(output, cmd.build_lib) + shutil.copyfile(output, relative_extension) + mode = os.stat(relative_extension).st_mode + mode |= (mode & 0o444) >> 2 + os.chmod(relative_extension, mode) + + +if __name__ == "__main__": + build() diff --git a/tests/masonry/builders/fixtures/extended_with_no_setup/extended/__init__.py b/tests/masonry/builders/fixtures/extended_with_no_setup/extended/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/extended_with_no_setup/extended/extended.c b/tests/masonry/builders/fixtures/extended_with_no_setup/extended/extended.c new file mode 100644 index 0000000..25a028e --- /dev/null +++ b/tests/masonry/builders/fixtures/extended_with_no_setup/extended/extended.c @@ -0,0 +1,58 @@ +#include + + +static PyObject *hello(PyObject *self) { + return PyUnicode_FromString("Hello"); +} + + +static PyMethodDef module_methods[] = { + { + "hello", + (PyCFunction) hello, + NULL, + PyDoc_STR("Say hello.") + }, + {NULL} +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "extended", + NULL, + -1, + module_methods, + NULL, + NULL, + NULL, + NULL, +}; +#endif + +PyMODINIT_FUNC +#if PY_MAJOR_VERSION >= 3 +PyInit_extended(void) +#else +init_extended(void) +#endif +{ + PyObject *module; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule3("extended", module_methods, NULL); +#endif + + if (module == NULL) +#if PY_MAJOR_VERSION >= 3 + return NULL; +#else + return; +#endif + +#if PY_MAJOR_VERSION >= 3 + return module; +#endif +} diff --git a/tests/masonry/builders/fixtures/extended_with_no_setup/pyproject.toml b/tests/masonry/builders/fixtures/extended_with_no_setup/pyproject.toml new file mode 100644 index 0000000..c5d5467 --- /dev/null +++ b/tests/masonry/builders/fixtures/extended_with_no_setup/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "extended" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + +[tool.poetry.build] +script = "build.py" +generate-setup-file = false diff --git a/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/__init__.py b/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/generated.py b/tests/masonry/builders/fixtures/include_excluded_code/lib/my_package/generated.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/include_excluded_code/pyproject.toml b/tests/masonry/builders/fixtures/include_excluded_code/pyproject.toml new file mode 100644 index 0000000..379b7db --- /dev/null +++ b/tests/masonry/builders/fixtures/include_excluded_code/pyproject.toml @@ -0,0 +1,20 @@ +[tool.poetry] +name = "my_package" +version = "0.1.0" +description = "" +authors = ["Audun Skaugen "] + +packages = [{include='my_package', from='lib'}] +# Simulate excluding due to .gitignore +exclude = ['lib/my_package/generated.py'] +# Include again +include = ['lib/my_package/generated.py'] + +[tool.poetry.dependencies] +python = "^3.8" + +[tool.poetry.dev-dependencies] + +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/LICENSE b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/README.rst b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/foo/bar/Foo.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Bar/foo/bar/Foo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/Bar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/Bar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/IncludedBar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/IncludedBar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/SecondBar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/SecondBar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/lowercasebar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/Foo/lowercasebar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/Bar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/Bar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/lowercasebar.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/FooBar/lowercasebar.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/__init__.py b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/pyproject.toml b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/pyproject.toml new file mode 100644 index 0000000..44e226c --- /dev/null +++ b/tests/masonry/builders/fixtures/invalid_case_sensitive_exclusions/pyproject.toml @@ -0,0 +1,44 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +exclude = [ + "my_package/Bar/*/bar/*.py" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" +extra-script = {reference = "my_package.extra:main", extras = ["time"], type = "console"} diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/COPYING b/tests/masonry/builders/fixtures/licenses_and_copying/COPYING new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/COPYING.txt b/tests/masonry/builders/fixtures/licenses_and_copying/COPYING.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE.md b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSE.md new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/BSD-3.md b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/BSD-3.md new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/CUSTOM-LICENSE b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/CUSTOM-LICENSE new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/MIT.txt b/tests/masonry/builders/fixtures/licenses_and_copying/LICENSES/MIT.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/README.rst b/tests/masonry/builders/fixtures/licenses_and_copying/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/licenses_and_copying/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/my_package/__init__.py b/tests/masonry/builders/fixtures/licenses_and_copying/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/licenses_and_copying/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/licenses_and_copying/pyproject.toml b/tests/masonry/builders/fixtures/licenses_and_copying/pyproject.toml new file mode 100644 index 0000000..70880bc --- /dev/null +++ b/tests/masonry/builders/fixtures/licenses_and_copying/pyproject.toml @@ -0,0 +1,49 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +maintainers = [ + "People Everywhere " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +[tool.poetry.dependencies.pendulum] +version = "^1.4" +markers= 'python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"' +optional = true + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" +extra-script = {reference = "my_package.extra:main", extras = ["time"], type = "console"} + +[tool.poetry.urls] +"Issue Tracker" = "https://github.com/python-poetry/poetry/issues" diff --git a/tests/masonry/builders/fixtures/localversionlabel/localversionlabel.py b/tests/masonry/builders/fixtures/localversionlabel/localversionlabel.py new file mode 100644 index 0000000..0f503ec --- /dev/null +++ b/tests/masonry/builders/fixtures/localversionlabel/localversionlabel.py @@ -0,0 +1 @@ +"""Test fixture for https://github.com/python-poetry/poetry/issues/756""" diff --git a/tests/masonry/builders/fixtures/localversionlabel/pyproject.toml b/tests/masonry/builders/fixtures/localversionlabel/pyproject.toml new file mode 100644 index 0000000..4b5f4dd --- /dev/null +++ b/tests/masonry/builders/fixtures/localversionlabel/pyproject.toml @@ -0,0 +1,5 @@ +[tool.poetry] +name = "localversionlabel" +description = "Local Version Label" +version = "0.1-beta.1+gitbranch-buildno-1" +authors = [] diff --git a/tests/masonry/builders/fixtures/module1/README.rst b/tests/masonry/builders/fixtures/module1/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/module1/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/module1/module1.py b/tests/masonry/builders/fixtures/module1/module1.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/module1/module1.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/module1/pyproject.toml b/tests/masonry/builders/fixtures/module1/pyproject.toml new file mode 100644 index 0000000..fd5af3a --- /dev/null +++ b/tests/masonry/builders/fixtures/module1/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "module1" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + + +[tool.poetry.dependencies] +python = "*" diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/module.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/subpkg/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only/pkg-stubs/subpkg/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only/pyproject.toml b/tests/masonry/builders/fixtures/pep_561_stub_only/pyproject.toml new file mode 100644 index 0000000..36a077a --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "pep-561-stubs" +version = "0.1" +description = "PEP 561 stub package example" +authors = [ + "Oleg Höfling " +] +license = "MIT" +packages = [ + {include = "pkg-stubs"} +] + +[tool.poetry.dependencies] +python = "^3.6" diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/module.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/py.typed b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/py.typed new file mode 100644 index 0000000..b648ac9 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/py.typed @@ -0,0 +1 @@ +partial diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/subpkg/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pkg-stubs/subpkg/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pyproject.toml b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pyproject.toml new file mode 100644 index 0000000..db202c0 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "pep-561-stubs" +version = "0.1" +description = "PEP 561 stub package example with the py.typed marker file" +authors = [ + "Oleg Höfling " +] +license = "MIT" +packages = [ + {include = "pkg-stubs"} +] + +[tool.poetry.dependencies] +python = "^3.6" diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/module.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/py.typed b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/py.typed new file mode 100644 index 0000000..b648ac9 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pkg-stubs/subpkg/py.typed @@ -0,0 +1 @@ +partial diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pyproject.toml b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pyproject.toml new file mode 100644 index 0000000..265effd --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_partial_namespace/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "pep-561-stubs" +version = "0.1" +description = "PEP 561 stub namespace package example with the py.typed marker file" +authors = [ + "Henrik Bruåsdal " +] +license = "MIT" +packages = [ + {include = "pkg-stubs"} +] + +[tool.poetry.dependencies] +python = "^3.6" diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_src/pyproject.toml b/tests/masonry/builders/fixtures/pep_561_stub_only_src/pyproject.toml new file mode 100644 index 0000000..666b2b2 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_src/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "pep-561-stubs" +version = "0.1" +description = "PEP 561 stub package example with an src layout" +authors = [ + "Oleg Höfling " +] +license = "MIT" +packages = [ + {include = "pkg-stubs", from = "src"} +] + +[tool.poetry.dependencies] +python = "^3.6" diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/module.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/subpkg/__init__.pyi b/tests/masonry/builders/fixtures/pep_561_stub_only_src/src/pkg-stubs/subpkg/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/prerelease/README.rst b/tests/masonry/builders/fixtures/prerelease/README.rst new file mode 100644 index 0000000..77d5439 --- /dev/null +++ b/tests/masonry/builders/fixtures/prerelease/README.rst @@ -0,0 +1,2 @@ +Prerelease +========== diff --git a/tests/masonry/builders/fixtures/prerelease/prerelease.py b/tests/masonry/builders/fixtures/prerelease/prerelease.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/prerelease/prerelease.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/prerelease/pyproject.toml b/tests/masonry/builders/fixtures/prerelease/pyproject.toml new file mode 100644 index 0000000..9e920c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/prerelease/pyproject.toml @@ -0,0 +1,12 @@ +[tool.poetry] +name = "prerelease" +version = "0.1-beta.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_string/README.rst b/tests/masonry/builders/fixtures/script_callable_legacy_string/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_string/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_string/my_package/__init__.py b/tests/masonry/builders/fixtures/script_callable_legacy_string/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_string/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_string/pyproject.toml b/tests/masonry/builders/fixtures/script_callable_legacy_string/pyproject.toml new file mode 100644 index 0000000..2c949a9 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_string/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] + +[tool.poetry.scripts] +script-legacy = "my_package:main" diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_table/README.rst b/tests/masonry/builders/fixtures/script_callable_legacy_table/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_table/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_table/my_package/__init__.py b/tests/masonry/builders/fixtures/script_callable_legacy_table/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_table/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_callable_legacy_table/pyproject.toml b/tests/masonry/builders/fixtures/script_callable_legacy_table/pyproject.toml new file mode 100644 index 0000000..cab2488 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_callable_legacy_table/pyproject.toml @@ -0,0 +1,21 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] +time = [] + +[tool.poetry.scripts] +script-legacy = { callable = "my_package.extra_legacy:main" } +extra-script-legacy = { callable = "my_package.extra_legacy:main", extras = ["time"] } diff --git a/tests/masonry/builders/fixtures/script_reference_console/README.rst b/tests/masonry/builders/fixtures/script_reference_console/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_console/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_reference_console/my_package/__init__.py b/tests/masonry/builders/fixtures/script_reference_console/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_console/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_reference_console/pyproject.toml b/tests/masonry/builders/fixtures/script_reference_console/pyproject.toml new file mode 100644 index 0000000..a55800b --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_console/pyproject.toml @@ -0,0 +1,21 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] +time = [] + +[tool.poetry.scripts] +script = { reference = "my_package.extra:main", type = "console" } +extra-script = { reference = "my_package.extra:main", extras = ["time"], type = "console" } diff --git a/tests/masonry/builders/fixtures/script_reference_file/README.rst b/tests/masonry/builders/fixtures/script_reference_file/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_reference_file/bin/script.sh b/tests/masonry/builders/fixtures/script_reference_file/bin/script.sh new file mode 100644 index 0000000..2a9686a --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file/bin/script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "Hello World!" \ No newline at end of file diff --git a/tests/masonry/builders/fixtures/script_reference_file/my_package/__init__.py b/tests/masonry/builders/fixtures/script_reference_file/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_reference_file/pyproject.toml b/tests/masonry/builders/fixtures/script_reference_file/pyproject.toml new file mode 100644 index 0000000..973a94c --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] + +[tool.poetry.scripts] +sh-script = { reference = "bin/script.sh", type = "file" } diff --git a/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/README.rst b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin/script.sh b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin/script.sh new file mode 100644 index 0000000..2a9686a --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/bin/script.sh @@ -0,0 +1,3 @@ +#!/usr/bin/env bash + +echo "Hello World!" \ No newline at end of file diff --git a/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/my_package/__init__.py b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/pyproject.toml b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/pyproject.toml new file mode 100644 index 0000000..7c6aa56 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_invalid_definition/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] + +[tool.poetry.scripts] +invalid_definition = { reference = "bin/script.sh", type = "ffiillee" } diff --git a/tests/masonry/builders/fixtures/script_reference_file_missing/README.rst b/tests/masonry/builders/fixtures/script_reference_file_missing/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_missing/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/script_reference_file_missing/my_package/__init__.py b/tests/masonry/builders/fixtures/script_reference_file_missing/my_package/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_missing/my_package/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/script_reference_file_missing/pyproject.toml b/tests/masonry/builders/fixtures/script_reference_file_missing/pyproject.toml new file mode 100644 index 0000000..973a94c --- /dev/null +++ b/tests/masonry/builders/fixtures/script_reference_file_missing/pyproject.toml @@ -0,0 +1,19 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "Poetry Maintainers " +] +license = "MIT" +readme = "README.rst" + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] + +[tool.poetry.extras] + +[tool.poetry.scripts] +sh-script = { reference = "bin/script.sh", type = "file" } diff --git a/tests/masonry/builders/fixtures/simple_version/README.rst b/tests/masonry/builders/fixtures/simple_version/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/simple_version/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/simple_version/pyproject.toml b/tests/masonry/builders/fixtures/simple_version/pyproject.toml new file mode 100644 index 0000000..4a8767b --- /dev/null +++ b/tests/masonry/builders/fixtures/simple_version/pyproject.toml @@ -0,0 +1,13 @@ +[tool.poetry] +name = "simple-version" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] + +readme = "README.rst" + + +[tool.poetry.dependencies] +python = "3.6" diff --git a/tests/masonry/builders/fixtures/simple_version/simple_version.py b/tests/masonry/builders/fixtures/simple_version/simple_version.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/simple_version/simple_version.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/single_python/README.rst b/tests/masonry/builders/fixtures/single_python/README.rst new file mode 100644 index 0000000..265d70d --- /dev/null +++ b/tests/masonry/builders/fixtures/single_python/README.rst @@ -0,0 +1,2 @@ +Single Python +============= diff --git a/tests/masonry/builders/fixtures/single_python/pyproject.toml b/tests/masonry/builders/fixtures/single_python/pyproject.toml new file mode 100644 index 0000000..5aca79e --- /dev/null +++ b/tests/masonry/builders/fixtures/single_python/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "single-python" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + + +[tool.poetry.dependencies] +python = "2.7.15" diff --git a/tests/masonry/builders/fixtures/single_python/single_python.py b/tests/masonry/builders/fixtures/single_python/single_python.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/single_python/single_python.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/source_file/README.rst b/tests/masonry/builders/fixtures/source_file/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/source_file/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/source_file/pyproject.toml b/tests/masonry/builders/fixtures/source_file/pyproject.toml new file mode 100644 index 0000000..34b77e4 --- /dev/null +++ b/tests/masonry/builders/fixtures/source_file/pyproject.toml @@ -0,0 +1,16 @@ +[tool.poetry] +name = "module-src" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + + +[tool.poetry.dependencies] +python = "*" diff --git a/tests/masonry/builders/fixtures/source_file/src/module_src.py b/tests/masonry/builders/fixtures/source_file/src/module_src.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/source_file/src/module_src.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/source_package/README.rst b/tests/masonry/builders/fixtures/source_package/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/source_package/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/source_package/pyproject.toml b/tests/masonry/builders/fixtures/source_package/pyproject.toml new file mode 100644 index 0000000..4456fdb --- /dev/null +++ b/tests/masonry/builders/fixtures/source_package/pyproject.toml @@ -0,0 +1,15 @@ +[tool.poetry] +name = "package-src" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + +[tool.poetry.dependencies] +python = "*" diff --git a/tests/masonry/builders/fixtures/source_package/src/package_src/__init__.py b/tests/masonry/builders/fixtures/source_package/src/package_src/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/source_package/src/package_src/module.py b/tests/masonry/builders/fixtures/source_package/src/package_src/module.py new file mode 100644 index 0000000..7ef41c5 --- /dev/null +++ b/tests/masonry/builders/fixtures/source_package/src/package_src/module.py @@ -0,0 +1,3 @@ +"""Example module""" + +__version__ = "0.1" diff --git a/tests/masonry/builders/fixtures/split_source/lib_a/module_a/__init__.py b/tests/masonry/builders/fixtures/split_source/lib_a/module_a/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/split_source/lib_b/module_b/__init__.py b/tests/masonry/builders/fixtures/split_source/lib_b/module_b/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/split_source/pyproject.toml b/tests/masonry/builders/fixtures/split_source/pyproject.toml new file mode 100644 index 0000000..f11bba0 --- /dev/null +++ b/tests/masonry/builders/fixtures/split_source/pyproject.toml @@ -0,0 +1,15 @@ +[tool.poetry] +name = "split-source" +version = "0.1" +description = "Combine packages from different locations." +authors = [ + "Jan Harkes " +] +license = "MIT" +packages = [ + { include = "module_a", from = "lib_a" }, + { include = "module_b", from = "lib_b" }, +] + +[tool.poetry.dependencies] +python = "^3.6" diff --git a/tests/masonry/builders/fixtures/src_extended/README.rst b/tests/masonry/builders/fixtures/src_extended/README.rst new file mode 100644 index 0000000..a7508bd --- /dev/null +++ b/tests/masonry/builders/fixtures/src_extended/README.rst @@ -0,0 +1,2 @@ +Module 1 +======== diff --git a/tests/masonry/builders/fixtures/src_extended/build.py b/tests/masonry/builders/fixtures/src_extended/build.py new file mode 100644 index 0000000..78eca4f --- /dev/null +++ b/tests/masonry/builders/fixtures/src_extended/build.py @@ -0,0 +1,8 @@ +from setuptools import Extension + + +extensions = [Extension("extended.extended", ["src/extended/extended.c"])] + + +def build(setup_kwargs): + setup_kwargs.update({"ext_modules": extensions}) diff --git a/tests/masonry/builders/fixtures/src_extended/pyproject.toml b/tests/masonry/builders/fixtures/src_extended/pyproject.toml new file mode 100644 index 0000000..f8e8bd8 --- /dev/null +++ b/tests/masonry/builders/fixtures/src_extended/pyproject.toml @@ -0,0 +1,14 @@ +[tool.poetry] +name = "extended" +version = "0.1" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" + +build = "build.py" diff --git a/tests/masonry/builders/fixtures/src_extended/setup.py b/tests/masonry/builders/fixtures/src_extended/setup.py new file mode 100644 index 0000000..cd4c43c --- /dev/null +++ b/tests/masonry/builders/fixtures/src_extended/setup.py @@ -0,0 +1,27 @@ +from setuptools import setup + +package_dir = {"": "src"} + +packages = ["extended"] + +package_data = {"": ["*"]} + +setup_kwargs = { + "name": "extended", + "version": "0.1", + "description": "Some description.", + "long_description": "Module 1\n========\n", + "author": "Sébastien Eustace", + "author_email": "sebastien@eustace.io", + "maintainer": "None", + "maintainer_email": "None", + "url": "https://python-poetry.org/", + "package_dir": package_dir, + "packages": packages, + "package_data": package_data, +} +from build import * + +build(setup_kwargs) + +setup(**setup_kwargs) diff --git a/tests/masonry/builders/fixtures/src_extended/src/extended/__init__.py b/tests/masonry/builders/fixtures/src_extended/src/extended/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/src_extended/src/extended/extended.c b/tests/masonry/builders/fixtures/src_extended/src/extended/extended.c new file mode 100644 index 0000000..25a028e --- /dev/null +++ b/tests/masonry/builders/fixtures/src_extended/src/extended/extended.c @@ -0,0 +1,58 @@ +#include + + +static PyObject *hello(PyObject *self) { + return PyUnicode_FromString("Hello"); +} + + +static PyMethodDef module_methods[] = { + { + "hello", + (PyCFunction) hello, + NULL, + PyDoc_STR("Say hello.") + }, + {NULL} +}; + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "extended", + NULL, + -1, + module_methods, + NULL, + NULL, + NULL, + NULL, +}; +#endif + +PyMODINIT_FUNC +#if PY_MAJOR_VERSION >= 3 +PyInit_extended(void) +#else +init_extended(void) +#endif +{ + PyObject *module; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule3("extended", module_methods, NULL); +#endif + + if (module == NULL) +#if PY_MAJOR_VERSION >= 3 + return NULL; +#else + return; +#endif + +#if PY_MAJOR_VERSION >= 3 + return module; +#endif +} diff --git a/tests/masonry/builders/fixtures/with-include/LICENSE b/tests/masonry/builders/fixtures/with-include/LICENSE new file mode 100644 index 0000000..44cf2b3 --- /dev/null +++ b/tests/masonry/builders/fixtures/with-include/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2018 Sébastien Eustace + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/tests/masonry/builders/fixtures/with-include/README.rst b/tests/masonry/builders/fixtures/with-include/README.rst new file mode 100644 index 0000000..f7fe154 --- /dev/null +++ b/tests/masonry/builders/fixtures/with-include/README.rst @@ -0,0 +1,2 @@ +My Package +========== diff --git a/tests/masonry/builders/fixtures/with-include/extra_dir/README.md b/tests/masonry/builders/fixtures/with-include/extra_dir/README.md new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/extra_dir/__init__.py b/tests/masonry/builders/fixtures/with-include/extra_dir/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/__init__.py b/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/vcs_excluded.txt b/tests/masonry/builders/fixtures/with-include/extra_dir/sub_pkg/vcs_excluded.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/extra_dir/vcs_excluded.txt b/tests/masonry/builders/fixtures/with-include/extra_dir/vcs_excluded.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/for_wheel_only/__init__.py b/tests/masonry/builders/fixtures/with-include/for_wheel_only/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/my_module.py b/tests/masonry/builders/fixtures/with-include/my_module.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/notes.txt b/tests/masonry/builders/fixtures/with-include/notes.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/package_with_include/__init__.py b/tests/masonry/builders/fixtures/with-include/package_with_include/__init__.py new file mode 100644 index 0000000..10aa336 --- /dev/null +++ b/tests/masonry/builders/fixtures/with-include/package_with_include/__init__.py @@ -0,0 +1 @@ +__version__ = "1.2.3" diff --git a/tests/masonry/builders/fixtures/with-include/pyproject.toml b/tests/masonry/builders/fixtures/with-include/pyproject.toml new file mode 100644 index 0000000..07f5f4c --- /dev/null +++ b/tests/masonry/builders/fixtures/with-include/pyproject.toml @@ -0,0 +1,55 @@ +[tool.poetry] +name = "with-include" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +readme = "README.rst" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +packages = [ + { include = "extra_dir/**/*.py" }, + { include = "extra_dir/**/*.py" }, + { include = "my_module.py" }, + { include = "package_with_include" }, + { include = "tests", format = "sdist" }, + { include = "for_wheel_only", format = ["wheel"] }, + { include = "src_package", from = "src"}, +] + +include = [ + "extra_dir/vcs_excluded.txt", + "notes.txt" +] + + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/with-include/src/src_package/__init__.py b/tests/masonry/builders/fixtures/with-include/src/src_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with-include/tests/__init__.py b/tests/masonry/builders/fixtures/with-include/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_bad_path_dep/pyproject.toml b/tests/masonry/builders/fixtures/with_bad_path_dep/pyproject.toml new file mode 100644 index 0000000..3c7edf1 --- /dev/null +++ b/tests/masonry/builders/fixtures/with_bad_path_dep/pyproject.toml @@ -0,0 +1,9 @@ +[tool.poetry] +name = "with_bad_path_dep" +version = "1.2.3" +description = "Some description." +authors = ["Awesome Hacker "] + +[tool.poetry.dependencies] +python = "^3.6" +bogus = { path = "../only/in/dev", develop = true } diff --git a/tests/masonry/builders/fixtures/with_bad_path_dep/with_bad_path_dep/__init__.py b/tests/masonry/builders/fixtures/with_bad_path_dep/with_bad_path_dep/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_bad_path_dev_dep/pyproject.toml b/tests/masonry/builders/fixtures/with_bad_path_dev_dep/pyproject.toml new file mode 100644 index 0000000..921d93a --- /dev/null +++ b/tests/masonry/builders/fixtures/with_bad_path_dev_dep/pyproject.toml @@ -0,0 +1,11 @@ +[tool.poetry] +name = "with_bad_path_dev_dep" +version = "1.2.3" +description = "Some description." +authors = ["Awesome Hacker "] + +[tool.poetry.dependencies] +python = "^3.6" + +[tool.poetry.dev-dependencies] +bogus = { path = "../only/in/dev", develop = true } diff --git a/tests/masonry/builders/fixtures/with_bad_path_dev_dep/with_bad_path_dev_dep/__init__.py b/tests/masonry/builders/fixtures/with_bad_path_dev_dep/with_bad_path_dev_dep/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/both.txt b/tests/masonry/builders/fixtures/with_include_inline_table/both.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/pyproject.toml b/tests/masonry/builders/fixtures/with_include_inline_table/pyproject.toml new file mode 100644 index 0000000..5309992 --- /dev/null +++ b/tests/masonry/builders/fixtures/with_include_inline_table/pyproject.toml @@ -0,0 +1,48 @@ +[tool.poetry] +name = "with-include" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +packages = [ + { include = "src_package", from = "src"}, +] + +include = [ + { path = "tests", format = "sdist" }, + { path = "both.txt" }, + { path = "wheel_only.txt", format = "wheel" } +] + + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = "^0.6" +cachy = { version = "^0.2.0", extras = ["msgpack"] } + +pendulum = { version = "^1.4", optional = true } + +[tool.poetry.dev-dependencies] +pytest = "~3.4" + +[tool.poetry.extras] +time = ["pendulum"] + +[tool.poetry.scripts] +my-script = "my_package:main" +my-2nd-script = "my_package:main2" diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/src/src_package/__init__.py b/tests/masonry/builders/fixtures/with_include_inline_table/src/src_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/tests/__init__.py b/tests/masonry/builders/fixtures/with_include_inline_table/tests/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/tests/test_foo/test.py b/tests/masonry/builders/fixtures/with_include_inline_table/tests/test_foo/test.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_include_inline_table/wheel_only.txt b/tests/masonry/builders/fixtures/with_include_inline_table/wheel_only.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_url_dependency/pyproject.toml b/tests/masonry/builders/fixtures/with_url_dependency/pyproject.toml new file mode 100644 index 0000000..c78f7ae --- /dev/null +++ b/tests/masonry/builders/fixtures/with_url_dependency/pyproject.toml @@ -0,0 +1,24 @@ +[tool.poetry] +name = "with-url-dependency" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +demo = { url = "https://python-poetry.org/distributions/demo-0.1.0-py2.py3-none-any.whl" } diff --git a/tests/masonry/builders/fixtures/with_url_dependency/with_url_dependency/__init__.py b/tests/masonry/builders/fixtures/with_url_dependency/with_url_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_vcs_dependency/pyproject.toml b/tests/masonry/builders/fixtures/with_vcs_dependency/pyproject.toml new file mode 100644 index 0000000..6fb808f --- /dev/null +++ b/tests/masonry/builders/fixtures/with_vcs_dependency/pyproject.toml @@ -0,0 +1,24 @@ +[tool.poetry] +name = "with-vcs-dependency" +version = "1.2.3" +description = "Some description." +authors = [ + "Sébastien Eustace " +] +license = "MIT" + +homepage = "https://python-poetry.org/" +repository = "https://github.com/python-poetry/poetry" +documentation = "https://python-poetry.org/docs" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +# Requirements +[tool.poetry.dependencies] +python = "^3.6" +cleo = { git = "https://github.com/sdispater/cleo.git", branch = "master" } diff --git a/tests/masonry/builders/fixtures/with_vcs_dependency/with_vcs_dependency/__init__.py b/tests/masonry/builders/fixtures/with_vcs_dependency/with_vcs_dependency/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/my_package/__init__.py b/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/my_package/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/pyproject.toml b/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/pyproject.toml new file mode 100644 index 0000000..6626840 --- /dev/null +++ b/tests/masonry/builders/fixtures/with_wildcard_dependency_constraint/pyproject.toml @@ -0,0 +1,11 @@ +[tool.poetry] +name = "my-package" +version = "1.2.3" +description = "Some description." +authors = [ + "People Everywhere " +] + +[tool.poetry.dependencies] +python = "^3.10" +google-api-python-client = ">=1.8,!=2.0.*" diff --git a/tests/masonry/builders/test_builder.py b/tests/masonry/builders/test_builder.py new file mode 100644 index 0000000..5e3fd6e --- /dev/null +++ b/tests/masonry/builders/test_builder.py @@ -0,0 +1,302 @@ +from __future__ import annotations + +import sys + +from email.parser import Parser +from pathlib import Path +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.factory import Factory +from poetry.core.masonry.builders.builder import Builder + + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + + +def test_builder_find_excluded_files(mocker: MockerFixture) -> None: + p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files") + p.return_value = [] + + builder = Builder( + Factory().create_poetry(Path(__file__).parent / "fixtures" / "complete") + ) + + assert builder.find_excluded_files() == {"my_package/sub_pkg1/extra_file.xml"} + + +@pytest.mark.xfail( + sys.platform == "win32", + reason="Windows is case insensitive for the most part", +) +def test_builder_find_case_sensitive_excluded_files(mocker: MockerFixture) -> None: + p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files") + p.return_value = [] + + builder = Builder( + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "case_sensitive_exclusions" + ) + ) + + assert builder.find_excluded_files() == { + "my_package/FooBar/Bar.py", + "my_package/FooBar/lowercasebar.py", + "my_package/Foo/SecondBar.py", + "my_package/Foo/Bar.py", + "my_package/Foo/lowercasebar.py", + "my_package/bar/foo.py", + "my_package/bar/CapitalFoo.py", + } + + +@pytest.mark.xfail( + sys.platform == "win32", + reason="Windows is case insensitive for the most part", +) +def test_builder_find_invalid_case_sensitive_excluded_files( + mocker: MockerFixture, +) -> None: + p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files") + p.return_value = [] + + builder = Builder( + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "invalid_case_sensitive_exclusions" + ) + ) + + assert {"my_package/Bar/foo/bar/Foo.py"} == builder.find_excluded_files() + + +def test_get_metadata_content() -> None: + builder = Builder( + Factory().create_poetry(Path(__file__).parent / "fixtures" / "complete") + ) + + metadata = builder.get_metadata_content() + + p = Parser() + parsed = p.parsestr(metadata) + + assert parsed["Metadata-Version"] == "2.1" + assert parsed["Name"] == "my-package" + assert parsed["Version"] == "1.2.3" + assert parsed["Summary"] == "Some description." + assert parsed["Author"] == "Sébastien Eustace" + assert parsed["Author-email"] == "sebastien@eustace.io" + assert parsed["Keywords"] == "packaging,dependency,poetry" + assert parsed["Requires-Python"] == ">=3.6,<4.0" + assert parsed["License"] == "MIT" + assert parsed["Home-page"] == "https://python-poetry.org/" + + classifiers = parsed.get_all("Classifier") + assert classifiers == [ + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules", + ] + + extras = parsed.get_all("Provides-Extra") + assert extras == ["time"] + + requires = parsed.get_all("Requires-Dist") + assert requires == [ + "cachy[msgpack] (>=0.2.0,<0.3.0)", + "cleo (>=0.6,<0.7)", + ( + 'pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform ==' + ' "win32" or python_version in "3.4 3.5") and (extra == "time")' + ), + ] + + urls = parsed.get_all("Project-URL") + assert urls == [ + "Documentation, https://python-poetry.org/docs", + "Issue Tracker, https://github.com/python-poetry/poetry/issues", + "Repository, https://github.com/python-poetry/poetry", + ] + + +def test_metadata_homepage_default() -> None: + builder = Builder( + Factory().create_poetry(Path(__file__).parent / "fixtures" / "simple_version") + ) + + metadata = Parser().parsestr(builder.get_metadata_content()) + + assert metadata["Home-page"] is None + + +def test_metadata_with_vcs_dependencies() -> None: + builder = Builder( + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "with_vcs_dependency" + ) + ) + + metadata = Parser().parsestr(builder.get_metadata_content()) + + requires_dist = metadata["Requires-Dist"] + + assert requires_dist == "cleo @ git+https://github.com/sdispater/cleo.git@master" + + +def test_metadata_with_url_dependencies() -> None: + builder = Builder( + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "with_url_dependency" + ) + ) + + metadata = Parser().parsestr(builder.get_metadata_content()) + + requires_dist = metadata["Requires-Dist"] + + assert ( + requires_dist + == "demo @" + " https://python-poetry.org/distributions/demo-0.1.0-py2.py3-none-any.whl" + ) + + +def test_missing_script_files_throws_error() -> None: + builder = Builder( + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "script_reference_file_missing" + ) + ) + + with pytest.raises(RuntimeError) as err: + builder.convert_script_files() + + assert "is not found." in err.value.args[0] + + +def test_invalid_script_files_definition() -> None: + with pytest.raises(RuntimeError) as err: + Builder( + Factory().create_poetry( + Path(__file__).parent + / "fixtures" + / "script_reference_file_invalid_definition" + ) + ) + + assert "configuration is invalid" in err.value.args[0] + assert "[scripts.invalid_definition]" in err.value.args[0] + + +@pytest.mark.parametrize( + "fixture", + [ + "script_callable_legacy_table", + ], +) +def test_entrypoint_scripts_legacy_warns(fixture: str) -> None: + with pytest.warns(DeprecationWarning): + Builder( + Factory().create_poetry(Path(__file__).parent / "fixtures" / fixture) + ).convert_entry_points() + + +@pytest.mark.parametrize( + "fixture, result", + [ + ( + "script_callable_legacy_table", + { + "console_scripts": [ + "extra-script-legacy = my_package.extra_legacy:main", + "script-legacy = my_package.extra_legacy:main", + ] + }, + ), + ( + "script_callable_legacy_string", + {"console_scripts": ["script-legacy = my_package:main"]}, + ), + ( + "script_reference_console", + { + "console_scripts": [ + "extra-script = my_package.extra:main[time]", + "script = my_package.extra:main", + ] + }, + ), + ( + "script_reference_file", + {}, + ), + ], +) +@pytest.mark.filterwarnings("ignore::DeprecationWarning") +def test_builder_convert_entry_points( + fixture: str, result: dict[str, list[str]] +) -> None: + entry_points = Builder( + Factory().create_poetry(Path(__file__).parent / "fixtures" / fixture) + ).convert_entry_points() + assert entry_points == result + + +@pytest.mark.parametrize( + "fixture, result", + [ + ( + "script_callable_legacy_table", + [], + ), + ( + "script_callable_legacy_string", + [], + ), + ( + "script_reference_console", + [], + ), + ( + "script_reference_file", + [Path("bin") / "script.sh"], + ), + ], +) +def test_builder_convert_script_files(fixture: str, result: list[Path]) -> None: + project_root = Path(__file__).parent / "fixtures" / fixture + script_files = Builder(Factory().create_poetry(project_root)).convert_script_files() + assert [p.relative_to(project_root) for p in script_files] == result + + +def test_metadata_with_readme_files() -> None: + test_path = Path(__file__).parent.parent.parent / "fixtures" / "with_readme_files" + builder = Builder(Factory().create_poetry(test_path)) + + metadata = Parser().parsestr(builder.get_metadata_content()) + + readme1 = test_path / "README-1.rst" + readme2 = test_path / "README-2.rst" + description = "\n".join([readme1.read_text(), readme2.read_text(), ""]) + + assert metadata.get_payload() == description + + +def test_metadata_with_wildcard_dependency_constraint() -> None: + test_path = ( + Path(__file__).parent / "fixtures" / "with_wildcard_dependency_constraint" + ) + builder = Builder(Factory().create_poetry(test_path)) + + metadata = Parser().parsestr(builder.get_metadata_content()) + + requires = metadata.get_all("Requires-Dist") + assert requires == ["google-api-python-client (>=1.8,!=2.0.*)"] diff --git a/tests/masonry/builders/test_complete.py b/tests/masonry/builders/test_complete.py new file mode 100644 index 0000000..c93ec38 --- /dev/null +++ b/tests/masonry/builders/test_complete.py @@ -0,0 +1,629 @@ +from __future__ import annotations + +import ast +import os +import platform +import re +import shutil +import sys +import tarfile +import tempfile +import zipfile + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterator + +import pytest + +from poetry.core import __version__ +from poetry.core.factory import Factory +from poetry.core.masonry.builder import Builder + + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + +fixtures_dir = Path(__file__).parent / "fixtures" + + +@pytest.fixture(autouse=True) +def setup() -> Iterator[None]: + clear_samples_dist() + + yield + + clear_samples_dist() + + +def clear_samples_dist() -> None: + for dist in fixtures_dir.glob("**/dist"): + if dist.is_dir(): + shutil.rmtree(str(dist)) + + +@pytest.mark.skipif( + sys.platform == "win32" + and sys.version_info <= (3, 6) + or platform.python_implementation().lower() == "pypy", + reason="Disable test on Windows for Python <=3.6 and for PyPy", +) +def test_wheel_c_extension() -> None: + module_path = fixtures_dir / "extended" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = fixtures_dir / "extended" / "dist" / "extended-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "extended-0.1/build.py" in tar.getnames() + assert "extended-0.1/extended/extended.c" in tar.getnames() + + whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0] + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + has_compiled_extension = False + for name in zip.namelist(): + if name.startswith("extended/extended") and name.endswith((".so", ".pyd")): + has_compiled_extension = True + + assert has_compiled_extension + + try: + wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode() + + assert ( + re.match( + f"""(?m)^\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: false +Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+ +$""", + wheel_data, + ) + is not None + ) + + records = zip.read("extended-0.1.dist-info/RECORD").decode() + + assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None + finally: + zip.close() + + +@pytest.mark.skipif( + sys.platform == "win32" + and sys.version_info <= (3, 6) + or platform.python_implementation().lower() == "pypy", + reason="Disable test on Windows for Python <=3.6 and for PyPy", +) +def test_wheel_c_extension_with_no_setup() -> None: + module_path = fixtures_dir / "extended_with_no_setup" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = fixtures_dir / "extended_with_no_setup" / "dist" / "extended-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "extended-0.1/build.py" in tar.getnames() + assert "extended-0.1/extended/extended.c" in tar.getnames() + + whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0] + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + has_compiled_extension = False + for name in zip.namelist(): + if name.startswith("extended/extended") and name.endswith((".so", ".pyd")): + has_compiled_extension = True + + assert has_compiled_extension + + try: + wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode() + + assert ( + re.match( + f"""(?m)^\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: false +Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+ +$""", + wheel_data, + ) + is not None + ) + + records = zip.read("extended-0.1.dist-info/RECORD").decode() + + assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None + finally: + zip.close() + + +@pytest.mark.skipif( + sys.platform == "win32" + and sys.version_info <= (3, 6) + or platform.python_implementation().lower() == "pypy", + reason="Disable test on Windows for Python <=3.6 and for PyPy", +) +def test_wheel_c_extension_src_layout() -> None: + module_path = fixtures_dir / "src_extended" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = fixtures_dir / "src_extended" / "dist" / "extended-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "extended-0.1/build.py" in tar.getnames() + assert "extended-0.1/src/extended/extended.c" in tar.getnames() + + whl = list((module_path / "dist").glob("extended-0.1-cp*-cp*-*.whl"))[0] + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + has_compiled_extension = False + for name in zip.namelist(): + if name.startswith("extended/extended") and name.endswith((".so", ".pyd")): + has_compiled_extension = True + + assert has_compiled_extension + + try: + wheel_data = zip.read("extended-0.1.dist-info/WHEEL").decode() + + assert ( + re.match( + f"""(?m)^\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: false +Tag: cp[23]_?\\d+-cp[23]_?\\d+m?u?-.+ +$""", + wheel_data, + ) + is not None + ) + + records = zip.read("extended-0.1.dist-info/RECORD").decode() + + assert re.search(r"\s+extended/extended.*\.(so|pyd)", records) is not None + finally: + zip.close() + + +def test_complete() -> None: + module_path = fixtures_dir / "complete" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + if sys.platform != "win32": + assert (os.stat(str(whl)).st_mode & 0o777) == 0o644 + + zip = zipfile.ZipFile(str(whl)) + + try: + assert "my_package/sub_pgk1/extra_file.xml" not in zip.namelist() + assert "my-package-1.2.3.data/scripts/script.sh" in zip.namelist() + assert ( + "Hello World" + in zip.read("my-package-1.2.3.data/scripts/script.sh").decode() + ) + + entry_points = zip.read("my_package-1.2.3.dist-info/entry_points.txt") + + assert ( + entry_points.decode() + == """\ +[console_scripts] +extra-script=my_package.extra:main[time] +my-2nd-script=my_package:main2 +my-script=my_package:main + +""" + ) + wheel_data = zip.read("my_package-1.2.3.dist-info/WHEEL").decode() + + assert ( + wheel_data + == f"""\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: true +Tag: py3-none-any +""" + ) + wheel_data = zip.read("my_package-1.2.3.dist-info/METADATA").decode() + + assert ( + wheel_data + == """\ +Metadata-Version: 2.1 +Name: my-package +Version: 1.2.3 +Summary: Some description. +Home-page: https://python-poetry.org/ +License: MIT +Keywords: packaging,dependency,poetry +Author: Sébastien Eustace +Author-email: sebastien@eustace.io +Maintainer: People Everywhere +Maintainer-email: people@everywhere.com +Requires-Python: >=3.6,<4.0 +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: time +Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0) +Requires-Dist: cleo (>=0.6,<0.7) +Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time") +Project-URL: Documentation, https://python-poetry.org/docs +Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues +Project-URL: Repository, https://github.com/python-poetry/poetry +Description-Content-Type: text/x-rst + +My Package +========== + +""" + ) + actual_records = zip.read("my_package-1.2.3.dist-info/RECORD").decode() + + # For some reason, the ordering of the files and the SHA hashes + # vary per operating systems and Python versions. + # So instead of 1:1 assertion, let's do a bit clunkier one: + + expected_records = [ + "my_package/__init__.py", + "my_package/data1/test.json", + "my_package/sub_pkg1/__init__.py", + "my_package/sub_pkg2/__init__.py", + "my_package/sub_pkg2/data2/data.json", + "my_package-1.2.3.dist-info/entry_points.txt", + "my_package-1.2.3.dist-info/LICENSE", + "my_package-1.2.3.dist-info/WHEEL", + "my_package-1.2.3.dist-info/METADATA", + ] + + for expected_record in expected_records: + assert expected_record in actual_records + + finally: + zip.close() + + +def test_complete_no_vcs() -> None: + # Copy the complete fixtures dir to a temporary directory + module_path = fixtures_dir / "complete" + temporary_dir = Path(tempfile.mkdtemp()) / "complete" + + shutil.copytree(module_path.as_posix(), temporary_dir.as_posix()) + + builder = Builder(Factory().create_poetry(temporary_dir)) + builder.build(fmt="all") + + whl = temporary_dir / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + # Check the zipped file to be sure that included and excluded files are + # correctly taken account of without vcs + expected_name_list = [ + "my_package/__init__.py", + "my_package/data1/test.json", + "my_package/sub_pkg1/__init__.py", + "my_package/sub_pkg2/__init__.py", + "my_package/sub_pkg2/data2/data.json", + "my-package-1.2.3.data/scripts/script.sh", + "my_package/sub_pkg3/foo.py", + "my_package-1.2.3.dist-info/entry_points.txt", + "my_package-1.2.3.dist-info/LICENSE", + "my_package-1.2.3.dist-info/WHEEL", + "my_package-1.2.3.dist-info/METADATA", + "my_package-1.2.3.dist-info/RECORD", + ] + + assert sorted(zip.namelist()) == sorted(expected_name_list) + + try: + entry_points = zip.read("my_package-1.2.3.dist-info/entry_points.txt") + + assert ( + entry_points.decode() + == """\ +[console_scripts] +extra-script=my_package.extra:main[time] +my-2nd-script=my_package:main2 +my-script=my_package:main + +""" + ) + wheel_data = zip.read("my_package-1.2.3.dist-info/WHEEL").decode() + + assert ( + wheel_data + == f"""\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: true +Tag: py3-none-any +""" + ) + wheel_data = zip.read("my_package-1.2.3.dist-info/METADATA").decode() + + assert ( + wheel_data + == """\ +Metadata-Version: 2.1 +Name: my-package +Version: 1.2.3 +Summary: Some description. +Home-page: https://python-poetry.org/ +License: MIT +Keywords: packaging,dependency,poetry +Author: Sébastien Eustace +Author-email: sebastien@eustace.io +Maintainer: People Everywhere +Maintainer-email: people@everywhere.com +Requires-Python: >=3.6,<4.0 +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: time +Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0) +Requires-Dist: cleo (>=0.6,<0.7) +Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time") +Project-URL: Documentation, https://python-poetry.org/docs +Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues +Project-URL: Repository, https://github.com/python-poetry/poetry +Description-Content-Type: text/x-rst + +My Package +========== + +""" + ) + finally: + zip.close() + + +def test_module_src() -> None: + module_path = fixtures_dir / "source_file" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = module_path / "dist" / "module_src-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "module_src-0.1/src/module_src.py" in tar.getnames() + + whl = module_path / "dist" / "module_src-0.1-py2.py3-none-any.whl" + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + try: + assert "module_src.py" in zip.namelist() + finally: + zip.close() + + +def test_package_src() -> None: + module_path = fixtures_dir / "source_package" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = module_path / "dist" / "package_src-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "package_src-0.1/src/package_src/module.py" in tar.getnames() + + whl = module_path / "dist" / "package_src-0.1-py2.py3-none-any.whl" + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + try: + assert "package_src/__init__.py" in zip.namelist() + assert "package_src/module.py" in zip.namelist() + finally: + zip.close() + + +def test_split_source() -> None: + module_path = fixtures_dir / "split_source" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = module_path / "dist" / "split_source-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "split_source-0.1/lib_a/module_a/__init__.py" in tar.getnames() + assert "split_source-0.1/lib_b/module_b/__init__.py" in tar.getnames() + + whl = module_path / "dist" / "split_source-0.1-py3-none-any.whl" + + assert whl.exists() + + zip = zipfile.ZipFile(str(whl)) + + try: + assert "module_a/__init__.py" in zip.namelist() + assert "module_b/__init__.py" in zip.namelist() + finally: + zip.close() + + +def test_package_with_include(mocker: MockerFixture) -> None: + module_path = fixtures_dir / "with-include" + + # Patch git module to return specific excluded files + p = mocker.patch("poetry.core.vcs.git.Git.get_ignored_files") + p.return_value = [ + str( + Path(__file__).parent + / "fixtures" + / "with-include" + / "extra_dir" + / "vcs_excluded.txt" + ), + str( + Path(__file__).parent + / "fixtures" + / "with-include" + / "extra_dir" + / "sub_pkg" + / "vcs_excluded.txt" + ), + ] + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = fixtures_dir / "with-include" / "dist" / "with_include-1.2.3.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + names = tar.getnames() + assert len(names) == len(set(names)) + assert "with_include-1.2.3/LICENSE" in names + assert "with_include-1.2.3/README.rst" in names + assert "with_include-1.2.3/extra_dir/__init__.py" in names + assert "with_include-1.2.3/extra_dir/vcs_excluded.txt" in names + assert "with_include-1.2.3/extra_dir/sub_pkg/__init__.py" in names + assert "with_include-1.2.3/extra_dir/sub_pkg/vcs_excluded.txt" not in names + assert "with_include-1.2.3/my_module.py" in names + assert "with_include-1.2.3/notes.txt" in names + assert "with_include-1.2.3/package_with_include/__init__.py" in names + assert "with_include-1.2.3/tests/__init__.py" in names + assert "with_include-1.2.3/pyproject.toml" in names + assert "with_include-1.2.3/setup.py" in names + assert "with_include-1.2.3/PKG-INFO" in names + assert "with_include-1.2.3/for_wheel_only/__init__.py" not in names + assert "with_include-1.2.3/src/src_package/__init__.py" in names + + file = tar.extractfile("with_include-1.2.3/setup.py") + assert file + setup = file.read() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert ns["package_dir"] == {"": "src"} + assert ns["packages"] == [ + "extra_dir", + "extra_dir.sub_pkg", + "package_with_include", + "src_package", + "tests", + ] + assert ns["package_data"] == {"": ["*"]} + assert ns["modules"] == ["my_module"] + + whl = module_path / "dist" / "with_include-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + names = z.namelist() + assert len(names) == len(set(names)) + assert "with_include-1.2.3.dist-info/LICENSE" in names + assert "extra_dir/__init__.py" in names + assert "extra_dir/vcs_excluded.txt" in names + assert "extra_dir/sub_pkg/__init__.py" in names + assert "extra_dir/sub_pkg/vcs_excluded.txt" not in names + assert "for_wheel_only/__init__.py" in names + assert "my_module.py" in names + assert "notes.txt" in names + assert "package_with_include/__init__.py" in names + assert "tests/__init__.py" not in names + assert "src_package/__init__.py" in names + + +def test_respect_format_for_explicit_included_files() -> None: + module_path = fixtures_dir / "exclude-whl-include-sdist" + builder = Builder(Factory().create_poetry(module_path)) + builder.build(fmt="all") + + sdist = module_path / "dist" / "exclude_whl_include_sdist-0.1.0.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + names = tar.getnames() + assert ( + "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/__init__.py" + in names + ) + assert ( + "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.c" + in names + ) + assert ( + "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/compiled/source.h" + in names + ) + assert ( + "exclude_whl_include_sdist-0.1.0/exclude_whl_include_sdist/cython_code.pyx" + in names + ) + assert "exclude_whl_include_sdist-0.1.0/pyproject.toml" in names + assert "exclude_whl_include_sdist-0.1.0/setup.py" in names + assert "exclude_whl_include_sdist-0.1.0/PKG-INFO" in names + + whl = module_path / "dist" / "exclude_whl_include_sdist-0.1.0-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + names = z.namelist() + assert "exclude_whl_include_sdist/__init__.py" in names + assert "exclude_whl_include_sdist/compiled/source.c" not in names + assert "exclude_whl_include_sdist/compiled/source.h" not in names + assert "exclude_whl_include_sdist/cython_code.pyx" not in names + + pass diff --git a/tests/masonry/builders/test_sdist.py b/tests/masonry/builders/test_sdist.py new file mode 100644 index 0000000..09f545c --- /dev/null +++ b/tests/masonry/builders/test_sdist.py @@ -0,0 +1,670 @@ +from __future__ import annotations + +import ast +import gzip +import hashlib +import shutil +import tarfile + +from email.parser import Parser +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterator + +import pytest + +from packaging.utils import canonicalize_name + +from poetry.core.factory import Factory +from poetry.core.masonry.builders.sdist import SdistBuilder +from poetry.core.masonry.utils.package_include import PackageInclude +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.project_package import ProjectPackage +from poetry.core.packages.vcs_dependency import VCSDependency + + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + +fixtures_dir = Path(__file__).parent / "fixtures" + + +@pytest.fixture(autouse=True) +def setup() -> Iterator[None]: + clear_samples_dist() + + yield + + clear_samples_dist() + + +def clear_samples_dist() -> None: + for dist in fixtures_dir.glob("**/dist"): + if dist.is_dir(): + shutil.rmtree(str(dist)) + + +def project(name: str) -> Path: + return Path(__file__).parent / "fixtures" / name + + +def test_convert_dependencies() -> None: + package = ProjectPackage("foo", "1.2.3") + result = SdistBuilder.convert_dependencies( + package, + [ + Dependency("A", "^1.0"), + Dependency("B", "~1.0"), + Dependency("C", "1.2.3"), + VCSDependency("D", "git", "https://github.com/sdispater/d.git"), + Dependency("E", "^1.0"), + Dependency("F", "^1.0,!=1.3"), + ], + ) + main = [ + "A>=1.0,<2.0", + "B>=1.0,<1.1", + "C==1.2.3", + "D @ git+https://github.com/sdispater/d.git", + "E>=1.0,<2.0", + "F>=1.0,<2.0,!=1.3", + ] + extras: dict[str, Any] = {} + + assert result == (main, extras) + + package = ProjectPackage("foo", "1.2.3") + package.extras = {canonicalize_name("bar"): [Dependency("A", "*")]} + + result = SdistBuilder.convert_dependencies( + package, + [ + Dependency("A", ">=1.2", optional=True), + Dependency("B", "~1.0"), + Dependency("C", "1.2.3"), + ], + ) + main = ["B>=1.0,<1.1", "C==1.2.3"] + extras = {"bar": ["A>=1.2"]} + + assert result == (main, extras) + + c = Dependency("C", "1.2.3") + c.python_versions = "~2.7 || ^3.6" + d = Dependency("D", "3.4.5", optional=True) + d.python_versions = "~2.7 || ^3.4" + + package.extras = {canonicalize_name("baz"): [Dependency("D", "*")]} + + result = SdistBuilder.convert_dependencies( + package, + [Dependency("A", ">=1.2", optional=True), Dependency("B", "~1.0"), c, d], + ) + main = ["B>=1.0,<1.1"] + + extra_python = ( + ':python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.6" and python_version < "4.0"' + ) + extra_d_dependency = ( + 'baz:python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.4" and python_version < "4.0"' + ) + extras = {extra_python: ["C==1.2.3"], extra_d_dependency: ["D==3.4.5"]} + + assert result == (main, extras) + + +def test_make_setup() -> None: + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + setup = builder.build_setup() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert ns["packages"] == [ + "my_package", + "my_package.sub_pkg1", + "my_package.sub_pkg2", + "my_package.sub_pkg3", + ] + assert ns["install_requires"] == ["cachy[msgpack]>=0.2.0,<0.3.0", "cleo>=0.6,<0.7"] + assert ns["entry_points"] == { + "console_scripts": [ + "extra-script = my_package.extra:main[time]", + "my-2nd-script = my_package:main2", + "my-script = my_package:main", + ] + } + assert ns["scripts"] == [str(Path("bin") / "script.sh")] + assert ns["extras_require"] == { + 'time:python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5"': [ + "pendulum>=1.4,<2.0" + ] + } + + +def test_make_pkg_info(mocker: MockerFixture) -> None: + get_metadata_content = mocker.patch( + "poetry.core.masonry.builders.builder.Builder.get_metadata_content" + ) + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + builder.build_pkg_info() + + assert get_metadata_content.called + + +def test_make_pkg_info_any_python() -> None: + poetry = Factory().create_poetry(project("module1")) + + builder = SdistBuilder(poetry) + pkg_info = builder.build_pkg_info() + p = Parser() + parsed = p.parsestr(pkg_info.decode()) + + assert "Requires-Python" not in parsed + + +def test_find_files_to_add() -> None: + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + result = [f.relative_to_source_root() for f in builder.find_files_to_add()] + + assert sorted(result) == sorted( + [ + Path("LICENSE"), + Path("README.rst"), + Path("bin/script.sh"), + Path("my_package/__init__.py"), + Path("my_package/data1/test.json"), + Path("my_package/sub_pkg1/__init__.py"), + Path("my_package/sub_pkg2/__init__.py"), + Path("my_package/sub_pkg2/data2/data.json"), + Path("my_package/sub_pkg3/foo.py"), + Path("pyproject.toml"), + ] + ) + + +def test_make_pkg_info_multi_constraints_dependency() -> None: + poetry = Factory().create_poetry( + Path(__file__).parent.parent.parent + / "fixtures" + / "project_with_multi_constraints_dependency" + ) + + builder = SdistBuilder(poetry) + pkg_info = builder.build_pkg_info() + p = Parser() + parsed = p.parsestr(pkg_info.decode()) + + requires = parsed.get_all("Requires-Dist") + assert requires == [ + 'pendulum (>=1.5,<2.0) ; python_version < "3.4"', + 'pendulum (>=2.0,<3.0) ; python_version >= "3.4" and python_version < "4.0"', + ] + + +def test_find_packages() -> None: + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + + base = project("complete") + include = PackageInclude(base, "my_package") + + pkg_dir, packages, pkg_data = builder.find_packages(include) + + assert pkg_dir is None + assert packages == [ + "my_package", + "my_package.sub_pkg1", + "my_package.sub_pkg2", + "my_package.sub_pkg3", + ] + assert pkg_data == { + "": ["*"], + "my_package": ["data1/*"], + "my_package.sub_pkg2": ["data2/*"], + } + + poetry = Factory().create_poetry(project("source_package")) + + builder = SdistBuilder(poetry) + + base = project("source_package") + include = PackageInclude(base, "package_src", source="src") + + pkg_dir, packages, pkg_data = builder.find_packages(include) + + assert pkg_dir == str(base / "src") + assert packages == ["package_src"] + assert pkg_data == {"": ["*"]} + + +def test_package() -> None: + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "complete" / "dist" / "my_package-1.2.3.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "my_package-1.2.3/LICENSE" in tar.getnames() + + +def test_sdist_reproducibility() -> None: + poetry = Factory().create_poetry(project("complete")) + + hashes = set() + + for _ in range(2): + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "complete" / "dist" / "my_package-1.2.3.tar.gz" + + assert sdist.exists() + + hashes.add(hashlib.sha256(sdist.read_bytes()).hexdigest()) + + assert len(hashes) == 1 + + +def test_setup_py_context() -> None: + poetry = Factory().create_poetry(project("complete")) + + builder = SdistBuilder(poetry) + + project_setup_py = poetry.file.parent / "setup.py" + + assert not project_setup_py.exists() + + try: + with builder.setup_py() as setup: + assert setup.exists() + assert project_setup_py == setup + + with open(str(setup), "rb") as f: + # we convert to string and replace line endings here for compatibility + data = f.read().decode().replace("\r\n", "\n") + assert data == builder.build_setup().decode() + + assert not project_setup_py.exists() + finally: + if project_setup_py.exists(): + project_setup_py.unlink() + + +def test_module() -> None: + poetry = Factory().create_poetry(project("module1")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "module1-0.1/module1.py" in tar.getnames() + + +def test_prelease() -> None: + poetry = Factory().create_poetry(project("prerelease")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "prerelease" / "dist" / "prerelease-0.1b1.tar.gz" + + assert sdist.exists() + + +@pytest.mark.parametrize("directory", ["extended", "extended_legacy_config"]) +def test_with_c_extensions(directory: str) -> None: + poetry = Factory().create_poetry(project("extended")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "extended" / "dist" / "extended-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "extended-0.1/build.py" in tar.getnames() + assert "extended-0.1/extended/extended.c" in tar.getnames() + + +def test_with_c_extensions_src_layout() -> None: + poetry = Factory().create_poetry(project("src_extended")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "src_extended" / "dist" / "extended-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "extended-0.1/build.py" in tar.getnames() + assert "extended-0.1/src/extended/extended.c" in tar.getnames() + + +def test_with_build_script_in_subdir() -> None: + poetry = Factory().create_poetry(project("build_script_in_subdir")) + + builder = SdistBuilder(poetry) + setup = builder.build_setup() + # should not error + ast.parse(setup) + + +def test_with_src_module_file() -> None: + poetry = Factory().create_poetry(project("source_file")) + + builder = SdistBuilder(poetry) + + # Check setup.py + setup = builder.build_setup() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert ns["package_dir"] == {"": "src"} + assert ns["modules"] == ["module_src"] + + builder.build() + + sdist = fixtures_dir / "source_file" / "dist" / "module_src-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "module_src-0.1/src/module_src.py" in tar.getnames() + + +def test_with_src_module_dir() -> None: + poetry = Factory().create_poetry(project("source_package")) + + builder = SdistBuilder(poetry) + + # Check setup.py + setup = builder.build_setup() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert ns["package_dir"] == {"": "src"} + assert ns["packages"] == ["package_src"] + + builder.build() + + sdist = fixtures_dir / "source_package" / "dist" / "package_src-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "package_src-0.1/src/package_src/__init__.py" in tar.getnames() + assert "package_src-0.1/src/package_src/module.py" in tar.getnames() + + +def test_default_with_excluded_data(mocker: MockerFixture) -> None: + class MockGit: + def get_ignored_files(self, folder: Path | None = None) -> list[str]: + # Patch git module to return specific excluded files + return [ + ( + ( + Path(__file__).parent + / "fixtures" + / "default_with_excluded_data" + / "my_package" + / "data" + / "sub_data" + / "data2.txt" + ) + .relative_to(project("default_with_excluded_data")) + .as_posix() + ) + ] + + p = mocker.patch("poetry.core.vcs.get_vcs") + p.return_value = MockGit() + poetry = Factory().create_poetry(project("default_with_excluded_data")) + + builder = SdistBuilder(poetry) + + # Check setup.py + setup = builder.build_setup() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert "package_dir" not in ns + assert ns["packages"] == ["my_package"] + assert ns["package_data"] == { + "": ["*"], + "my_package": ["data/*", "data/sub_data/data3.txt"], + } + + builder.build() + + sdist = ( + fixtures_dir / "default_with_excluded_data" / "dist" / "my_package-1.2.3.tar.gz" + ) + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + names = tar.getnames() + assert len(names) == len(set(names)) + assert "my_package-1.2.3/LICENSE" in names + assert "my_package-1.2.3/README.rst" in names + assert "my_package-1.2.3/my_package/__init__.py" in names + assert "my_package-1.2.3/my_package/data/data1.txt" in names + assert "my_package-1.2.3/pyproject.toml" in names + assert "my_package-1.2.3/setup.py" in names + assert "my_package-1.2.3/PKG-INFO" in names + # all last modified times should be set to a valid timestamp + for tarinfo in tar.getmembers(): + if tarinfo.name in [ + "my_package-1.2.3/setup.py", + "my_package-1.2.3/PKG-INFO", + ]: + # generated files have timestamp set to 0 + assert tarinfo.mtime == 0 + continue + assert tarinfo.mtime > 0 + + +def test_src_excluded_nested_data() -> None: + module_path = fixtures_dir / "exclude_nested_data_toml" + poetry = Factory().create_poetry(module_path) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = module_path / "dist" / "my_package-1.2.3.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + names = tar.getnames() + assert len(names) == len(set(names)) + assert "my_package-1.2.3/LICENSE" in names + assert "my_package-1.2.3/README.rst" in names + assert "my_package-1.2.3/pyproject.toml" in names + assert "my_package-1.2.3/setup.py" in names + assert "my_package-1.2.3/PKG-INFO" in names + assert "my_package-1.2.3/my_package/__init__.py" in names + assert "my_package-1.2.3/my_package/data/sub_data/data2.txt" not in names + assert "my_package-1.2.3/my_package/data/sub_data/data3.txt" not in names + assert "my_package-1.2.3/my_package/data/data1.txt" not in names + assert "my_package-1.2.3/my_package/data/data2.txt" in names + assert "my_package-1.2.3/my_package/puplic/publicdata.txt" in names + assert "my_package-1.2.3/my_package/public/item1/itemdata1.txt" not in names + assert ( + "my_package-1.2.3/my_package/public/item1/subitem/subitemdata.txt" + not in names + ) + assert "my_package-1.2.3/my_package/public/item2/itemdata2.txt" not in names + + +def test_proper_python_requires_if_two_digits_precision_version_specified() -> None: + poetry = Factory().create_poetry(project("simple_version")) + + builder = SdistBuilder(poetry) + pkg_info = builder.build_pkg_info() + p = Parser() + parsed = p.parsestr(pkg_info.decode()) + + assert parsed["Requires-Python"] == ">=3.6,<3.7" + + +def test_proper_python_requires_if_three_digits_precision_version_specified() -> None: + poetry = Factory().create_poetry(project("single_python")) + + builder = SdistBuilder(poetry) + pkg_info = builder.build_pkg_info() + p = Parser() + parsed = p.parsestr(pkg_info.decode()) + + assert parsed["Requires-Python"] == "==2.7.15" + + +def test_includes() -> None: + poetry = Factory().create_poetry(project("with-include")) + + builder = SdistBuilder(poetry) + + builder.build() + + sdist = fixtures_dir / "with-include" / "dist" / "with_include-1.2.3.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "with_include-1.2.3/extra_dir/vcs_excluded.txt" in tar.getnames() + assert "with_include-1.2.3/notes.txt" in tar.getnames() + + +def test_includes_with_inline_table() -> None: + poetry = Factory().create_poetry(project("with_include_inline_table")) + + builder = SdistBuilder(poetry) + + builder.build() + + sdist = ( + fixtures_dir + / "with_include_inline_table" + / "dist" + / "with_include-1.2.3.tar.gz" + ) + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert "with_include-1.2.3/both.txt" in tar.getnames() + assert "with_include-1.2.3/wheel_only.txt" not in tar.getnames() + assert "with_include-1.2.3/tests/__init__.py" in tar.getnames() + assert "with_include-1.2.3/tests/test_foo/test.py" in tar.getnames() + + +def test_excluded_subpackage() -> None: + poetry = Factory().create_poetry(project("excluded_subpackage")) + + builder = SdistBuilder(poetry) + setup = builder.build_setup() + + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + + assert ns["packages"] == ["example"] + + +def test_sdist_package_pep_561_stub_only() -> None: + root = fixtures_dir / "pep_561_stub_only" + poetry = Factory().create_poetry(root) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = root / "dist" / "pep_561_stubs-0.1.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + names = tar.getnames() + assert "pep_561_stubs-0.1/pkg-stubs/__init__.pyi" in names + assert "pep_561_stubs-0.1/pkg-stubs/module.pyi" in names + assert "pep_561_stubs-0.1/pkg-stubs/subpkg/__init__.pyi" in names + + +def test_sdist_disable_setup_py() -> None: + module_path = fixtures_dir / "disable_setup_py" + poetry = Factory().create_poetry(module_path) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = module_path / "dist" / "my_package-1.2.3.tar.gz" + + assert sdist.exists() + + with tarfile.open(str(sdist), "r") as tar: + assert set(tar.getnames()) == { + "my_package-1.2.3/README.rst", + "my_package-1.2.3/pyproject.toml", + "my_package-1.2.3/PKG-INFO", + "my_package-1.2.3/my_package/__init__.py", + } + + +def test_sdist_mtime_zero() -> None: + poetry = Factory().create_poetry(project("module1")) + + builder = SdistBuilder(poetry) + builder.build() + + sdist = fixtures_dir / "module1" / "dist" / "module1-0.1.tar.gz" + + assert sdist.exists() + + with gzip.open(str(sdist), "rb") as gz: + gz.read(100) + assert gz.mtime == 0 + + +def test_split_source() -> None: + root = fixtures_dir / "split_source" + poetry = Factory().create_poetry(root) + + builder = SdistBuilder(poetry) + + # Check setup.py + setup = builder.build_setup() + setup_ast = ast.parse(setup) + + setup_ast.body = [n for n in setup_ast.body if isinstance(n, ast.Assign)] + ns: dict[str, Any] = {} + exec(compile(setup_ast, filename="setup.py", mode="exec"), ns) + assert "" in ns["package_dir"] and "module_b" in ns["package_dir"] diff --git a/tests/masonry/builders/test_wheel.py b/tests/masonry/builders/test_wheel.py new file mode 100644 index 0000000..2a9ee05 --- /dev/null +++ b/tests/masonry/builders/test_wheel.py @@ -0,0 +1,352 @@ +from __future__ import annotations + +import os +import shutil +import zipfile + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import Iterator +from typing import TextIO + +import pytest + +from poetry.core.factory import Factory +from poetry.core.masonry.builders.wheel import WheelBuilder +from tests.masonry.builders.test_sdist import project + + +if TYPE_CHECKING: + from _pytest.monkeypatch import MonkeyPatch + from pytest_mock import MockerFixture + +fixtures_dir = Path(__file__).parent / "fixtures" + + +@pytest.fixture(autouse=True) +def setup() -> Iterator[None]: + clear_samples_dist() + + yield + + clear_samples_dist() + + +def clear_samples_dist() -> None: + for dist in fixtures_dir.glob("**/dist"): + if dist.is_dir(): + shutil.rmtree(str(dist)) + + +def test_wheel_module() -> None: + module_path = fixtures_dir / "module1" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "module1-0.1-py2.py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "module1.py" in z.namelist() + + +def test_wheel_package() -> None: + module_path = fixtures_dir / "complete" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "my_package/sub_pkg1/__init__.py" in z.namelist() + + +def test_wheel_prerelease() -> None: + module_path = fixtures_dir / "prerelease" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "prerelease-0.1b1-py2.py3-none-any.whl" + + assert whl.exists() + + +def test_wheel_epoch() -> None: + module_path = fixtures_dir / "epoch" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "epoch-1!2.0-py2.py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "epoch-1!2.0.dist-info/METADATA" in z.namelist() + + +def test_wheel_excluded_data() -> None: + module_path = fixtures_dir / "default_with_excluded_data_toml" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "my_package/__init__.py" in z.namelist() + assert "my_package/data/sub_data/data2.txt" in z.namelist() + assert "my_package/data/sub_data/data3.txt" in z.namelist() + assert "my_package/data/data1.txt" not in z.namelist() + + +def test_wheel_excluded_nested_data() -> None: + module_path = fixtures_dir / "exclude_nested_data_toml" + poetry = Factory().create_poetry(module_path) + WheelBuilder.make(poetry) + + whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "my_package/__init__.py" in z.namelist() + assert "my_package/data/sub_data/data2.txt" not in z.namelist() + assert "my_package/data/sub_data/data3.txt" not in z.namelist() + assert "my_package/data/data1.txt" not in z.namelist() + assert "my_package/data/data2.txt" in z.namelist() + assert "my_package/puplic/publicdata.txt" in z.namelist() + assert "my_package/public/item1/itemdata1.txt" not in z.namelist() + assert "my_package/public/item1/subitem/subitemdata.txt" not in z.namelist() + assert "my_package/public/item2/itemdata2.txt" not in z.namelist() + + +def test_include_excluded_code() -> None: + module_path = fixtures_dir / "include_excluded_code" + poetry = Factory().create_poetry(module_path) + wb = WheelBuilder(poetry) + wb.build() + whl = module_path / "dist" / wb.wheel_filename + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "my_package/__init__.py" in z.namelist() + assert "my_package/generated.py" in z.namelist() + assert "lib/my_package/generated.py" not in z.namelist() + + +def test_wheel_localversionlabel() -> None: + module_path = fixtures_dir / "localversionlabel" + project = Factory().create_poetry(module_path) + WheelBuilder.make(project) + local_version_string = "localversionlabel-0.1b1+gitbranch.buildno.1" + whl = module_path / "dist" / (local_version_string + "-py2.py3-none-any.whl") + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert local_version_string + ".dist-info/METADATA" in z.namelist() + + +def test_wheel_package_src() -> None: + module_path = fixtures_dir / "source_package" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "package_src-0.1-py2.py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "package_src/__init__.py" in z.namelist() + assert "package_src/module.py" in z.namelist() + + +def test_wheel_module_src() -> None: + module_path = fixtures_dir / "source_file" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "module_src-0.1-py2.py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "module_src.py" in z.namelist() + + +def test_dist_info_file_permissions() -> None: + module_path = fixtures_dir / "complete" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "my_package-1.2.3-py3-none-any.whl" + + with zipfile.ZipFile(str(whl)) as z: + assert ( + z.getinfo("my_package-1.2.3.dist-info/WHEEL").external_attr & 0x1FF0000 + == 0o644 << 16 + ) + assert ( + z.getinfo("my_package-1.2.3.dist-info/METADATA").external_attr & 0x1FF0000 + == 0o644 << 16 + ) + assert ( + z.getinfo("my_package-1.2.3.dist-info/RECORD").external_attr & 0x1FF0000 + == 0o644 << 16 + ) + assert ( + z.getinfo("my_package-1.2.3.dist-info/entry_points.txt").external_attr + & 0x1FF0000 + == 0o644 << 16 + ) + + +def test_wheel_includes_inline_table() -> None: + module_path = fixtures_dir / "with_include_inline_table" + WheelBuilder.make(Factory().create_poetry(module_path)) + + whl = module_path / "dist" / "with_include-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "both.txt" in z.namelist() + assert "wheel_only.txt" in z.namelist() + assert "notes.txt" not in z.namelist() + + +@pytest.mark.parametrize( + "package", + ["pep_561_stub_only", "pep_561_stub_only_partial", "pep_561_stub_only_src"], +) +def test_wheel_package_pep_561_stub_only(package: str) -> None: + root = fixtures_dir / package + WheelBuilder.make(Factory().create_poetry(root)) + + whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "pkg-stubs/__init__.pyi" in z.namelist() + assert "pkg-stubs/module.pyi" in z.namelist() + assert "pkg-stubs/subpkg/__init__.pyi" in z.namelist() + + +def test_wheel_package_pep_561_stub_only_partial_namespace() -> None: + root = fixtures_dir / "pep_561_stub_only_partial_namespace" + WheelBuilder.make(Factory().create_poetry(root)) + + whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "pkg-stubs/module.pyi" in z.namelist() + assert "pkg-stubs/subpkg/__init__.pyi" in z.namelist() + assert "pkg-stubs/subpkg/py.typed" in z.namelist() + + +def test_wheel_package_pep_561_stub_only_includes_typed_marker() -> None: + root = fixtures_dir / "pep_561_stub_only_partial" + WheelBuilder.make(Factory().create_poetry(root)) + + whl = root / "dist" / "pep_561_stubs-0.1-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + assert "pkg-stubs/py.typed" in z.namelist() + + +def test_wheel_includes_licenses_in_correct_paths() -> None: + root = fixtures_dir / "licenses_and_copying" + WheelBuilder.make(Factory().create_poetry(root)) + + whl = root / "dist" / "my_package-1.2.3-py3-none-any.whl" + + assert whl.exists() + with zipfile.ZipFile(str(whl)) as z: + assert "my_package-1.2.3.dist-info/COPYING" in z.namelist() + assert "my_package-1.2.3.dist-info/COPYING.txt" in z.namelist() + assert "my_package-1.2.3.dist-info/LICENSE" in z.namelist() + assert "my_package-1.2.3.dist-info/LICENSE.md" in z.namelist() + assert "my_package-1.2.3.dist-info/LICENSES/CUSTOM-LICENSE" in z.namelist() + assert "my_package-1.2.3.dist-info/LICENSES/BSD-3.md" in z.namelist() + assert "my_package-1.2.3.dist-info/LICENSES/MIT.txt" in z.namelist() + + +def test_wheel_with_file_with_comma() -> None: + root = fixtures_dir / "comma_file" + WheelBuilder.make(Factory().create_poetry(root)) + + whl = root / "dist" / "comma_file-1.2.3-py3-none-any.whl" + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + records = z.read("comma_file-1.2.3.dist-info/RECORD") + assert '\n"comma_file/a,b.py"' in records.decode() + + +def test_default_src_with_excluded_data(mocker: MockerFixture) -> None: + class MockGit: + def get_ignored_files(self, folder: Path | None = None) -> list[str]: + # Patch git module to return specific excluded files + return [ + ( + ( + Path(__file__).parent + / "fixtures" + / "default_src_with_excluded_data" + / "src" + / "my_package" + / "data" + / "sub_data" + / "data2.txt" + ) + .relative_to(project("default_src_with_excluded_data")) + .as_posix() + ) + ] + + p = mocker.patch("poetry.core.vcs.get_vcs") + p.return_value = MockGit() + poetry = Factory().create_poetry(project("default_src_with_excluded_data")) + + builder = WheelBuilder(poetry) + builder.build() + + whl = ( + fixtures_dir + / "default_src_with_excluded_data" + / "dist" + / "my_package-1.2.3-py3-none-any.whl" + ) + + assert whl.exists() + + with zipfile.ZipFile(str(whl)) as z: + names = z.namelist() + assert "my_package/__init__.py" in names + assert "my_package/data/data1.txt" in names + assert "my_package/data/sub_data/data2.txt" not in names + assert "my_package/data/sub_data/data3.txt" in names + + +def test_wheel_file_is_closed(monkeypatch: MonkeyPatch) -> None: + """Confirm that wheel zip files are explicitly closed.""" + + # Using a list is a hack for Python 2.7 compatibility. + fd_file: list[TextIO | None] = [None] + + real_fdopen = os.fdopen + + def capturing_fdopen(*args: Any, **kwargs: Any) -> TextIO | None: + fd_file[0] = real_fdopen(*args, **kwargs) + return fd_file[0] + + monkeypatch.setattr(os, "fdopen", capturing_fdopen) + + module_path = fixtures_dir / "module1" + WheelBuilder.make(Factory().create_poetry(module_path)) + + assert fd_file[0] is not None + assert fd_file[0].closed diff --git a/tests/masonry/test_api.py b/tests/masonry/test_api.py new file mode 100644 index 0000000..dfc1699 --- /dev/null +++ b/tests/masonry/test_api.py @@ -0,0 +1,256 @@ +from __future__ import annotations + +import os +import platform +import sys +import zipfile + +from contextlib import contextmanager +from pathlib import Path +from typing import Iterator + +import pytest + +from poetry.core import __version__ +from poetry.core.masonry import api +from poetry.core.utils.helpers import temporary_directory +from tests.testutils import validate_sdist_contents +from tests.testutils import validate_wheel_contents + + +@contextmanager +def cwd(directory: str | Path) -> Iterator[None]: + prev = os.getcwd() + os.chdir(str(directory)) + try: + yield + finally: + os.chdir(prev) + + +fixtures = os.path.join(os.path.dirname(__file__), "builders", "fixtures") + + +def test_get_requires_for_build_wheel() -> None: + expected: list[str] = [] + with cwd(os.path.join(fixtures, "complete")): + assert api.get_requires_for_build_wheel() == expected + + +def test_get_requires_for_build_sdist() -> None: + expected: list[str] = [] + with cwd(os.path.join(fixtures, "complete")): + assert api.get_requires_for_build_sdist() == expected + + +def test_build_wheel() -> None: + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")): + filename = api.build_wheel(tmp_dir) + validate_wheel_contents( + name="my_package", + version="1.2.3", + path=str(os.path.join(tmp_dir, filename)), + files=["entry_points.txt"], + ) + + +def test_build_wheel_with_include() -> None: + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "with-include")): + filename = api.build_wheel(tmp_dir) + validate_wheel_contents( + name="with_include", + version="1.2.3", + path=str(os.path.join(tmp_dir, filename)), + files=["entry_points.txt"], + ) + + +def test_build_wheel_with_bad_path_dev_dep_succeeds() -> None: + with temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dev_dep") + ): + api.build_wheel(tmp_dir) + + +def test_build_wheel_with_bad_path_dep_fails() -> None: + with pytest.raises(ValueError) as err, temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dep") + ): + api.build_wheel(tmp_dir) + assert "does not exist" in str(err.value) + + +@pytest.mark.skipif( + sys.platform == "win32" + and sys.version_info <= (3, 6) + or platform.python_implementation().lower() == "pypy", + reason="Disable test on Windows for Python <=3.6 and for PyPy", +) +def test_build_wheel_extended() -> None: + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "extended")): + filename = api.build_wheel(tmp_dir) + whl = Path(tmp_dir) / filename + assert whl.exists() + validate_wheel_contents(name="extended", version="0.1", path=whl.as_posix()) + + +def test_build_sdist() -> None: + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")): + filename = api.build_sdist(tmp_dir) + validate_sdist_contents( + name="my-package", + version="1.2.3", + path=str(os.path.join(tmp_dir, filename)), + files=["LICENSE"], + ) + + +def test_build_sdist_with_include() -> None: + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "with-include")): + filename = api.build_sdist(tmp_dir) + validate_sdist_contents( + name="with-include", + version="1.2.3", + path=str(os.path.join(tmp_dir, filename)), + files=["LICENSE"], + ) + + +def test_build_sdist_with_bad_path_dev_dep_succeeds() -> None: + with temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dev_dep") + ): + api.build_sdist(tmp_dir) + + +def test_build_sdist_with_bad_path_dep_fails() -> None: + with pytest.raises(ValueError) as err, temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dep") + ): + api.build_sdist(tmp_dir) + assert "does not exist" in str(err.value) + + +def test_prepare_metadata_for_build_wheel() -> None: + entry_points = """\ +[console_scripts] +extra-script=my_package.extra:main[time] +my-2nd-script=my_package:main2 +my-script=my_package:main + +""" + wheel_data = f"""\ +Wheel-Version: 1.0 +Generator: poetry-core {__version__} +Root-Is-Purelib: true +Tag: py3-none-any +""" + metadata = """\ +Metadata-Version: 2.1 +Name: my-package +Version: 1.2.3 +Summary: Some description. +Home-page: https://python-poetry.org/ +License: MIT +Keywords: packaging,dependency,poetry +Author: Sébastien Eustace +Author-email: sebastien@eustace.io +Maintainer: People Everywhere +Maintainer-email: people@everywhere.com +Requires-Python: >=3.6,<4.0 +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Topic :: Software Development :: Build Tools +Classifier: Topic :: Software Development :: Libraries :: Python Modules +Provides-Extra: time +Requires-Dist: cachy[msgpack] (>=0.2.0,<0.3.0) +Requires-Dist: cleo (>=0.6,<0.7) +Requires-Dist: pendulum (>=1.4,<2.0) ; (python_version ~= "2.7" and sys_platform == "win32" or python_version in "3.4 3.5") and (extra == "time") +Project-URL: Documentation, https://python-poetry.org/docs +Project-URL: Issue Tracker, https://github.com/python-poetry/poetry/issues +Project-URL: Repository, https://github.com/python-poetry/poetry +Description-Content-Type: text/x-rst + +My Package +========== + +""" + with temporary_directory() as tmp_dir, cwd(os.path.join(fixtures, "complete")): + dirname = api.prepare_metadata_for_build_wheel(tmp_dir) + + assert dirname == "my_package-1.2.3.dist-info" + + dist_info = Path(tmp_dir, dirname) + + assert (dist_info / "entry_points.txt").exists() + assert (dist_info / "WHEEL").exists() + assert (dist_info / "METADATA").exists() + + with (dist_info / "entry_points.txt").open(encoding="utf-8") as f: + assert entry_points == f.read() + + with (dist_info / "WHEEL").open(encoding="utf-8") as f: + assert wheel_data == f.read() + + with (dist_info / "METADATA").open(encoding="utf-8") as f: + assert metadata == f.read() + + +def test_prepare_metadata_for_build_wheel_with_bad_path_dev_dep_succeeds() -> None: + with temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dev_dep") + ): + api.prepare_metadata_for_build_wheel(tmp_dir) + + +def test_prepare_metadata_for_build_wheel_with_bad_path_dep_succeeds() -> None: + with pytest.raises(ValueError) as err, temporary_directory() as tmp_dir, cwd( + os.path.join(fixtures, "with_bad_path_dep") + ): + api.prepare_metadata_for_build_wheel(tmp_dir) + assert "does not exist" in str(err.value) + + +def test_build_editable_wheel() -> None: + pkg_dir = Path(fixtures) / "complete" + + with temporary_directory() as tmp_dir, cwd(pkg_dir): + filename = api.build_editable(tmp_dir) + wheel_pth = Path(tmp_dir) / filename + + validate_wheel_contents( + name="my_package", + version="1.2.3", + path=str(wheel_pth), + ) + + with zipfile.ZipFile(wheel_pth) as z: + namelist = z.namelist() + + assert "my_package.pth" in namelist + assert pkg_dir.as_posix() == z.read("my_package.pth").decode().strip() + + +def test_build_wheel_with_metadata_directory() -> None: + with temporary_directory() as metadata_tmp_dir, cwd( + os.path.join(fixtures, "complete") + ): + metadata_directory = api.prepare_metadata_for_build_wheel(metadata_tmp_dir) + + with temporary_directory() as wheel_tmp_dir: + dist_info_path = Path(metadata_tmp_dir) / metadata_directory + filename = api.build_wheel( + wheel_tmp_dir, metadata_directory=str(dist_info_path) + ) + validate_wheel_contents( + name="my_package", + version="1.2.3", + path=str(os.path.join(wheel_tmp_dir, filename)), + files=["entry_points.txt"], + ) diff --git a/tests/masonry/utils/__init__.py b/tests/masonry/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only/bad/__init__.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only/bad/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only/bad/module.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only/bad/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/utils/fixtures/pep_561_stub_only/bad/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/__init__.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/module.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/utils/fixtures/pep_561_stub_only/good-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/module.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/module.pyi new file mode 100644 index 0000000..d79e6e3 --- /dev/null +++ b/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/module.pyi @@ -0,0 +1,4 @@ +"""Example module""" +from typing import Tuple + +version_info = Tuple[int, int, int] diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/__init__.pyi b/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/__init__.pyi new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed b/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed new file mode 100644 index 0000000..b648ac9 --- /dev/null +++ b/tests/masonry/utils/fixtures/pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed @@ -0,0 +1 @@ +partial diff --git a/tests/masonry/utils/fixtures/with_includes/__init__.py b/tests/masonry/utils/fixtures/with_includes/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/with_includes/bar/baz.py b/tests/masonry/utils/fixtures/with_includes/bar/baz.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/foo.py b/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/foo.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/quux.py b/tests/masonry/utils/fixtures/with_includes/extra_package/some_dir/quux.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/fixtures/with_includes/not_a_python_pkg/baz.txt b/tests/masonry/utils/fixtures/with_includes/not_a_python_pkg/baz.txt new file mode 100644 index 0000000..e69de29 diff --git a/tests/masonry/utils/test_helpers.py b/tests/masonry/utils/test_helpers.py new file mode 100644 index 0000000..b03ff39 --- /dev/null +++ b/tests/masonry/utils/test_helpers.py @@ -0,0 +1,23 @@ +from __future__ import annotations + +import warnings + +import pytest + +from poetry.core.masonry.utils.helpers import escape_name + + +@pytest.mark.parametrize( + "name,expected", + [ + ("foo", "foo"), + ("foo-bar", "foo_bar"), + ("FOO-bAr", "foo_bar"), + ("foo.bar", "foo_bar"), + ("foo123-ba---.r", "foo123_ba_r"), + ], +) +def test_escape_name(name: str, expected: str) -> None: + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + assert escape_name(name) == expected diff --git a/tests/masonry/utils/test_package_include.py b/tests/masonry/utils/test_package_include.py new file mode 100644 index 0000000..913a405 --- /dev/null +++ b/tests/masonry/utils/test_package_include.py @@ -0,0 +1,85 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from poetry.core.masonry.utils.package_include import PackageInclude + + +fixtures_dir = Path(__file__).parent / "fixtures" +with_includes = fixtures_dir / "with_includes" + + +def test_package_include_with_multiple_dirs() -> None: + pkg_include = PackageInclude(base=fixtures_dir, include="with_includes") + assert pkg_include.elements == [ + with_includes / "__init__.py", + with_includes / "bar", + with_includes / "bar/baz.py", + with_includes / "extra_package", + with_includes / "extra_package/some_dir", + with_includes / "extra_package/some_dir/foo.py", + with_includes / "extra_package/some_dir/quux.py", + with_includes / "not_a_python_pkg", + with_includes / "not_a_python_pkg/baz.txt", + ] + + +def test_package_include_with_simple_dir() -> None: + pkg_include = PackageInclude(base=with_includes, include="bar") + assert pkg_include.elements == [with_includes / "bar/baz.py"] + + +def test_package_include_with_nested_dir() -> None: + pkg_include = PackageInclude(base=with_includes, include="extra_package/**/*.py") + assert pkg_include.elements == [ + with_includes / "extra_package/some_dir/foo.py", + with_includes / "extra_package/some_dir/quux.py", + ] + + +def test_package_include_with_no_python_files_in_dir() -> None: + with pytest.raises(ValueError) as e: + PackageInclude(base=with_includes, include="not_a_python_pkg") + + assert str(e.value) == "not_a_python_pkg is not a package." + + +def test_package_include_with_non_existent_directory() -> None: + with pytest.raises(ValueError) as e: + PackageInclude(base=with_includes, include="not_a_dir") + + err_str = str(with_includes / "not_a_dir") + " does not contain any element" + + assert str(e.value) == err_str + + +def test_pep_561_stub_only_package_good_name_suffix() -> None: + pkg_include = PackageInclude( + base=fixtures_dir / "pep_561_stub_only", include="good-stubs" + ) + assert pkg_include.elements == [ + fixtures_dir / "pep_561_stub_only/good-stubs/__init__.pyi", + fixtures_dir / "pep_561_stub_only/good-stubs/module.pyi", + ] + + +def test_pep_561_stub_only_partial_namespace_package_good_name_suffix() -> None: + pkg_include = PackageInclude( + base=fixtures_dir / "pep_561_stub_only_partial_namespace", include="good-stubs" + ) + assert pkg_include.elements == [ + fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/module.pyi", + fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/", + fixtures_dir + / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/__init__.pyi", + fixtures_dir / "pep_561_stub_only_partial_namespace/good-stubs/subpkg/py.typed", + ] + + +def test_pep_561_stub_only_package_bad_name_suffix() -> None: + with pytest.raises(ValueError) as e: + PackageInclude(base=fixtures_dir / "pep_561_stub_only", include="bad") + + assert str(e.value) == "bad is not a package." diff --git a/tests/packages/__init__.py b/tests/packages/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/packages/test_dependency.py b/tests/packages/test_dependency.py new file mode 100644 index 0000000..e47bcc1 --- /dev/null +++ b/tests/packages/test_dependency.py @@ -0,0 +1,364 @@ +from __future__ import annotations + +import pytest + +from packaging.utils import canonicalize_name + +from poetry.core.constraints.version.exceptions import ParseConstraintError +from poetry.core.packages.dependency import Dependency +from poetry.core.version.markers import parse_marker + + +@pytest.mark.parametrize( + "constraint,result", + [ + ("^1.0", False), + ("^1.0.dev0", True), + ("^1.0.0", False), + ("^1.0.0.dev0", True), + ("^1.0.0.alpha0", True), + ("^1.0.0.alpha0+local", True), + ("^1.0.0.rc0+local", True), + ("^1.0.0-1", False), + ], +) +def test_allows_prerelease(constraint: str, result: bool) -> None: + assert Dependency("A", constraint).allows_prereleases() == result + + +def test_to_pep_508() -> None: + dependency = Dependency("Django", "^1.23") + + result = dependency.to_pep_508() + assert result == "Django (>=1.23,<2.0)" + + dependency = Dependency("Django", "^1.23") + dependency.python_versions = "~2.7 || ^3.6" + + result = dependency.to_pep_508() + assert ( + result + == "Django (>=1.23,<2.0) ; " + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.6" and python_version < "4.0"' + ) + + +def test_to_pep_508_wilcard() -> None: + dependency = Dependency("Django", "*") + + result = dependency.to_pep_508() + assert result == "Django" + + +def test_to_pep_508_in_extras() -> None: + dependency = Dependency("Django", "^1.23") + dependency.in_extras.append(canonicalize_name("foo")) + + result = dependency.to_pep_508() + assert result == 'Django (>=1.23,<2.0) ; extra == "foo"' + + result = dependency.to_pep_508(with_extras=False) + assert result == "Django (>=1.23,<2.0)" + + dependency.in_extras.append(canonicalize_name("bar")) + + result = dependency.to_pep_508() + assert result == 'Django (>=1.23,<2.0) ; extra == "foo" or extra == "bar"' + + dependency.python_versions = "~2.7 || ^3.6" + + result = dependency.to_pep_508() + assert ( + result + == "Django (>=1.23,<2.0) ; " + "(" + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.6" and python_version < "4.0"' + ") " + 'and (extra == "foo" or extra == "bar")' + ) + + result = dependency.to_pep_508(with_extras=False) + assert ( + result + == "Django (>=1.23,<2.0) ; " + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.6" and python_version < "4.0"' + ) + + +def test_to_pep_508_in_extras_parsed() -> None: + dependency = Dependency.create_from_pep_508( + 'foo[baz,bar] (>=1.23,<2.0) ; extra == "baz"' + ) + + result = dependency.to_pep_508() + assert result == 'foo[bar,baz] (>=1.23,<2.0) ; extra == "baz"' + + result = dependency.to_pep_508(with_extras=False) + assert result == "foo[bar,baz] (>=1.23,<2.0)" + + +@pytest.mark.parametrize( + ("exclusion", "expected"), + [ + ("!=1.2.3", "!=1.2.3"), + ("!=1.2.*", "!=1.2.*"), + ("<2.0 || >=2.1", "!=2.0.*"), + ], +) +def test_to_pep_508_with_excluded_versions(exclusion: str, expected: str) -> None: + dependency = Dependency("foo", exclusion) + + assert dependency.to_pep_508() == f"foo ({expected})" + + +@pytest.mark.parametrize( + "python_versions, marker", + [ + (">=3.5,<3.5.4", 'python_version >= "3.5" and python_full_version < "3.5.4"'), + (">=3.5.4,<3.6", 'python_full_version >= "3.5.4" and python_version < "3.6"'), + ("<3.5.4", 'python_full_version < "3.5.4"'), + (">=3.5.4", 'python_full_version >= "3.5.4"'), + ("== 3.5.4", 'python_full_version == "3.5.4"'), + ], +) +def test_to_pep_508_with_patch_python_version( + python_versions: str, marker: str +) -> None: + dependency = Dependency("Django", "^1.23") + dependency.python_versions = python_versions + + expected = f"Django (>=1.23,<2.0) ; {marker}" + + assert dependency.to_pep_508() == expected + assert str(dependency.marker) == marker + + +def test_to_pep_508_tilde() -> None: + dependency = Dependency("foo", "~1.2.3") + + assert dependency.to_pep_508() == "foo (>=1.2.3,<1.3.0)" + + dependency = Dependency("foo", "~1.2") + + assert dependency.to_pep_508() == "foo (>=1.2,<1.3)" + + dependency = Dependency("foo", "~0.2.3") + + assert dependency.to_pep_508() == "foo (>=0.2.3,<0.3.0)" + + dependency = Dependency("foo", "~0.2") + + assert dependency.to_pep_508() == "foo (>=0.2,<0.3)" + + +def test_to_pep_508_caret() -> None: + dependency = Dependency("foo", "^1.2.3") + + assert dependency.to_pep_508() == "foo (>=1.2.3,<2.0.0)" + + dependency = Dependency("foo", "^1.2") + + assert dependency.to_pep_508() == "foo (>=1.2,<2.0)" + + dependency = Dependency("foo", "^0.2.3") + + assert dependency.to_pep_508() == "foo (>=0.2.3,<0.3.0)" + + dependency = Dependency("foo", "^0.2") + + assert dependency.to_pep_508() == "foo (>=0.2,<0.3)" + + +def test_to_pep_508_combination() -> None: + dependency = Dependency("foo", "^1.2,!=1.3.5") + + assert dependency.to_pep_508() == "foo (>=1.2,<2.0,!=1.3.5)" + + dependency = Dependency("foo", "~1.2,!=1.2.5") + + assert dependency.to_pep_508() == "foo (>=1.2,<1.3,!=1.2.5)" + + +def test_complete_name() -> None: + assert Dependency("foo", ">=1.2.3").complete_name == "foo" + assert ( + Dependency("foo", ">=1.2.3", extras=["baz", "bar"]).complete_name + == "foo[bar,baz]" + ) + + +@pytest.mark.parametrize( + "name,constraint,extras,expected", + [ + ("A", ">2.7,<3.0", None, "A (>2.7,<3.0)"), + ("A", ">2.7,<3.0", ["x"], "A[x] (>2.7,<3.0)"), + ("A", ">=1.6.5,<1.8.0 || >1.8.0,<3.1.0", None, "A (>=1.6.5,!=1.8.0,<3.1.0)"), + ( + "A", + ">=1.6.5,<1.8.0 || >1.8.0,<3.1.0", + ["x"], + "A[x] (>=1.6.5,!=1.8.0,<3.1.0)", + ), + # test single version range exclusions + ("A", ">=1.8,!=2.0.*", None, "A (>=1.8,!=2.0.*)"), + ("A", "!=0.0.*", None, "A (!=0.0.*)"), + ("A", "!=0.1.*", None, "A (!=0.1.*)"), + ("A", "!=0.*", None, "A (>=1.0.0)"), + ("A", ">=1.8,!=2.*", None, "A (>=1.8,!=2.*)"), + ("A", ">=1.8,!=2.*.*", None, "A (>=1.8,!=2.*)"), + ("A", ">=1.8,<2.0 || >=2.1.0", None, "A (>=1.8,!=2.0.*)"), + ("A", ">=1.8,<2.0.0 || >=3.0.0", None, "A (>=1.8,!=2.*)"), + ("A", ">=1.8,<2.0 || >=3", None, "A (>=1.8,!=2.*)"), + ("A", ">=1.8,<2 || >=2.1.0", None, "A (>=1.8,!=2.0.*)"), + ("A", ">=1.8,<2 || >=2.1", None, "A (>=1.8,!=2.0.*)"), + ("A", ">=1.8,!=2.0.*,!=3.0.*", None, "A (>=1.8,!=2.0.*,!=3.0.*)"), + ("A", ">=1.8.0.0,<2.0.0.0 || >=2.0.1.0", None, "A (>=1.8.0.0,!=2.0.0.*)"), + ("A", ">=1.8.0.0,<2 || >=2.0.1.0", None, "A (>=1.8.0.0,!=2.0.0.*)"), + # we verify that the range exclusion logic is not too eager + ("A", ">=1.8,<2.0 || >=2.2.0", None, "A (>=1.8,<2.0 || >=2.2.0)"), + ("A", ">=1.8,<2.0 || >=2.1.5", None, "A (>=1.8,<2.0 || >=2.1.5)"), + ("A", ">=1.8.0.0,<2 || >=2.0.1.5", None, "A (>=1.8.0.0,<2 || >=2.0.1.5)"), + # non-semver version test is ignored due to existing bug in wildcard + # constraint parsing that ignores non-semver versions + # TODO: re-enable for verification once fixed + # ("A", ">=1.8.0.0,!=2.0.0.*", None, "A (>=1.8.0.0,!=2.0.0.*)"), # noqa: E800 + ], +) +def test_dependency_string_representation( + name: str, constraint: str, extras: list[str] | None, expected: str +) -> None: + dependency = Dependency(name=name, constraint=constraint, extras=extras) + assert str(dependency) == expected + + +def test_set_constraint_sets_pretty_constraint() -> None: + dependency = Dependency("A", "^1.0") + assert dependency.pretty_constraint == "^1.0" + dependency.constraint = "^2.0" # type: ignore[assignment] + assert dependency.pretty_constraint == "^2.0" + + +def test_set_bogus_constraint_raises_exception() -> None: + dependency = Dependency("A", "^1.0") + with pytest.raises(ParseConstraintError): + dependency.constraint = "^=4.5" # type: ignore[assignment] + + +def test_with_constraint() -> None: + dependency = Dependency( + "foo", + "^1.2.3", + optional=True, + groups=["dev"], + allows_prereleases=True, + extras=["bar", "baz"], + ) + dependency.marker = parse_marker( + 'python_version >= "3.6" and python_version < "4.0"' + ) + dependency.transitive_marker = parse_marker( + 'python_version >= "3.7" and python_version < "4.0"' + ) + dependency.python_versions = "^3.6" + dependency.transitive_python_versions = "^3.7" + + new = dependency.with_constraint("^1.2.6") + + assert new.name == dependency.name + assert str(new.constraint) == ">=1.2.6,<2.0.0" + assert new.is_optional() + assert new.groups == frozenset(["dev"]) + assert new.allows_prereleases() + assert set(new.extras) == {"bar", "baz"} + assert new.marker == dependency.marker + assert new.transitive_marker == dependency.transitive_marker + assert new.python_constraint == dependency.python_constraint + assert new.transitive_python_constraint == dependency.transitive_python_constraint + + +@pytest.mark.parametrize( + "marker, expected", + [ + ('python_version >= "3.6" and python_version < "4.0"', ">=3.6,<4.0"), + ('sys_platform == "linux"', "*"), + ('python_version >= "3.9" or sys_platform == "linux"', "*"), + ('python_version >= "3.9" and sys_platform == "linux"', ">=3.9"), + ], +) +def test_marker_properly_sets_python_constraint(marker: str, expected: str) -> None: + dependency = Dependency("foo", "^1.2.3") + dependency.marker = marker # type: ignore[assignment] + assert str(dependency.python_constraint) == expected + + +def test_dependency_markers_are_the_same_as_markers() -> None: + dependency = Dependency.create_from_pep_508('foo ; extra=="bar"') + marker = parse_marker('extra=="bar"') + + assert dependency.marker == marker + + +def test_marker_properly_unsets_python_constraint() -> None: + dependency = Dependency("foo", "^1.2.3") + + dependency.marker = 'python_version >= "3.6"' # type: ignore[assignment] + assert str(dependency.python_constraint) == ">=3.6" + + dependency.marker = "*" # type: ignore[assignment] + assert str(dependency.python_constraint) == "*" + + +def test_create_from_pep_508_url_with_activated_extras() -> None: + dependency = Dependency.create_from_pep_508("name [fred,bar] @ http://foo.com") + assert dependency.extras == {"fred", "bar"} + + +@pytest.mark.parametrize( + "dependency1, dependency2, expected", + [ + (Dependency("a", "1.0"), Dependency("a", "1.0"), True), + (Dependency("a", "1.0"), Dependency("a", "1.0.1"), False), + (Dependency("a", "1.0"), Dependency("a1", "1.0"), False), + (Dependency("a", "1.0"), Dependency("a", "1.0", source_type="file"), False), + # constraint is implicitly given for direct origin dependencies, + # but might not be set + ( + Dependency("a", "1.0", source_type="file"), + Dependency("a", "*", source_type="file"), + True, + ), + # constraint is not implicit for non direct origin dependencies + (Dependency("a", "1.0"), Dependency("a", "*"), False), + ( + Dependency("a", "1.0", source_type="legacy"), + Dependency("a", "*", source_type="legacy"), + False, + ), + ], +) +def test_eq(dependency1: Dependency, dependency2: Dependency, expected: bool) -> None: + assert (dependency1 == dependency2) is expected + assert (dependency2 == dependency1) is expected + + +@pytest.mark.parametrize( + "attr_name, value", + [ + ("constraint", "2.0"), + ("python_versions", "<3.8"), + ("transitive_python_versions", "<3.8"), + ("marker", "sys_platform == 'linux'"), + ("transitive_marker", "sys_platform == 'linux'"), + ], +) +def test_mutable_attributes_not_in_hash(attr_name: str, value: str) -> None: + dependency = Dependency("foo", "^1.2.3") + ref_hash = hash(dependency) + + ref_value = getattr(dependency, attr_name) + setattr(dependency, attr_name, value) + assert value != ref_value + assert hash(dependency) == ref_hash diff --git a/tests/packages/test_dependency_group.py b/tests/packages/test_dependency_group.py new file mode 100644 index 0000000..9f65acf --- /dev/null +++ b/tests/packages/test_dependency_group.py @@ -0,0 +1,26 @@ +from __future__ import annotations + +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.dependency_group import DependencyGroup + + +def test_dependency_group_remove_dependency() -> None: + group = DependencyGroup(name="linter") + group.add_dependency(Dependency(name="black", constraint="*")) + group.add_dependency(Dependency(name="isort", constraint="*")) + group.add_dependency(Dependency(name="flake8", constraint="*")) + + assert {dependency.name for dependency in group.dependencies} == { + "black", + "isort", + "flake8", + } + + group.remove_dependency("isort") + assert {dependency.name for dependency in group.dependencies} == {"black", "flake8"} + + group.remove_dependency("black") + assert {dependency.name for dependency in group.dependencies} == {"flake8"} + + group.remove_dependency("flake8") + assert {dependency.name for dependency in group.dependencies} == set() diff --git a/tests/packages/test_directory_dependency.py b/tests/packages/test_directory_dependency.py new file mode 100644 index 0000000..00cda29 --- /dev/null +++ b/tests/packages/test_directory_dependency.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +from pathlib import Path +from typing import cast + +import pytest + +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.directory_dependency import DirectoryDependency + + +DIST_PATH = Path(__file__).parent.parent / "fixtures" / "git" / "github.com" / "demo" +SAMPLE_PROJECT = Path(__file__).parent.parent / "fixtures" / "sample_project" + + +def test_directory_dependency_must_exist() -> None: + with pytest.raises(ValueError): + DirectoryDependency("demo", DIST_PATH / "invalid") + + +def _test_directory_dependency_pep_508( + name: str, path: Path, pep_508_input: str, pep_508_output: str | None = None +) -> None: + dep = Dependency.create_from_pep_508( + pep_508_input, relative_to=Path(__file__).parent + ) + + assert dep.is_directory() + dep = cast("DirectoryDependency", dep) + assert dep.name == name + assert dep.path == path + assert dep.to_pep_508() == (pep_508_output or pep_508_input) + + +def test_directory_dependency_pep_508_local_absolute() -> None: + path = ( + Path(__file__).parent.parent + / "fixtures" + / "project_with_multi_constraints_dependency" + ) + expected = f"demo @ {path.as_uri()}" + + requirement = f"demo @ file://{path.as_posix()}" + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + requirement = f"demo @ {path}" + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + +def test_directory_dependency_pep_508_localhost() -> None: + path = ( + Path(__file__).parent.parent + / "fixtures" + / "project_with_multi_constraints_dependency" + ) + requirement = f"demo @ file://localhost{path.as_posix()}" + expected = f"demo @ {path.as_uri()}" + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + +def test_directory_dependency_pep_508_local_relative() -> None: + path = Path("..") / "fixtures" / "project_with_multi_constraints_dependency" + + with pytest.raises(ValueError): + requirement = f"demo @ file://{path.as_posix()}" + _test_directory_dependency_pep_508("demo", path, requirement) + + requirement = f"demo @ {path}" + base = Path(__file__).parent + expected = f"demo @ {(base / path).resolve().as_uri()}" + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + +def test_directory_dependency_pep_508_extras() -> None: + path = ( + Path(__file__).parent.parent + / "fixtures" + / "project_with_multi_constraints_dependency" + ) + requirement = f"demo[foo,bar] @ file://{path.as_posix()}" + expected = f"demo[bar,foo] @ {path.as_uri()}" + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + +def test_directory_dependency_pep_508_with_marker() -> None: + path = ( + Path(__file__).parent.parent + / "fixtures" + / "project_with_multi_constraints_dependency" + ) + requirement = f'demo @ file://{path.as_posix()} ; sys_platform == "linux"' + expected = f'demo @ {path.as_uri()} ; sys_platform == "linux"' + _test_directory_dependency_pep_508("demo", path, requirement, expected) + + +@pytest.mark.parametrize( + "name,path,extras,constraint,expected", + [ + ( + "my-package", + SAMPLE_PROJECT, + None, + None, + f"my-package (*) @ {SAMPLE_PROJECT.as_uri()}", + ), + ( + "my-package", + SAMPLE_PROJECT, + ["db"], + "1.2", + f"my-package[db] (1.2) @ {SAMPLE_PROJECT.as_uri()}", + ), + ], +) +def test_directory_dependency_string_representation( + name: str, + path: Path, + extras: list[str] | None, + constraint: str | None, + expected: str, +) -> None: + dependency = DirectoryDependency(name=name, path=path, extras=extras) + if constraint: + dependency.constraint = constraint # type: ignore[assignment] + assert str(dependency) == expected + + +@pytest.mark.parametrize( + ("fixture", "name"), + [ + ("project_with_pep517_non_poetry", "PEP 517"), + ("project_with_setup_cfg_only", "setup.cfg"), + ], +) +def test_directory_dependency_non_poetry_pep517(fixture: str, name: str) -> None: + path = Path(__file__).parent.parent / "fixtures" / fixture + + try: + DirectoryDependency("package", path) + except ValueError as e: + if "does not seem to be a Python package" not in str(e): + raise e from e + pytest.fail(f"A {name} project not recognized as valid directory dependency") diff --git a/tests/packages/test_file_dependency.py b/tests/packages/test_file_dependency.py new file mode 100644 index 0000000..ee1b119 --- /dev/null +++ b/tests/packages/test_file_dependency.py @@ -0,0 +1,234 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import cast + +import pytest + +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.file_dependency import FileDependency +from poetry.core.version.markers import SingleMarker + + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + + from poetry.core.version.markers import BaseMarker + +DIST_PATH = Path(__file__).parent.parent / "fixtures" / "distributions" +TEST_FILE = "demo-0.1.0.tar.gz" + + +def test_file_dependency_wrong_path() -> None: + with pytest.raises(ValueError): + FileDependency("demo", DIST_PATH / "demo-0.2.0.tar.gz") + + +def test_file_dependency_dir() -> None: + with pytest.raises(ValueError): + FileDependency("demo", DIST_PATH) + + +def test_default_hash() -> None: + path = DIST_PATH / TEST_FILE + dep = FileDependency("demo", path) + sha_256 = "72e8531e49038c5f9c4a837b088bfcb8011f4a9f76335c8f0654df6ac539b3d6" + assert dep.hash() == sha_256 + + +try: + from hashlib import algorithms_guaranteed +except ImportError: + algorithms_guaranteed = {"md5", "sha1", "sha224", "sha256", "sha384", "sha512"} + + +@pytest.mark.parametrize( + "hash_name,expected", + [ + (hash_name, value) + for hash_name, value in [ + ("sha224", "972d02f36539a98599aed0566bc8aaf3e6701f4e895dd797d8f5248e"), + ( + "sha3_512", + "c04ee109ae52d6440445e24dbd6d244a1d0f0289ef79cb7ba9bc3c139c0237169af9a8f61cd1cf4fc17f853ddf84f97c475ac5bb6c91a4aff0b825b884d4896c", + ), + ( + "blake2s", + "c336ecbc9d867c9d860accfba4c3723c51c4b5c47a1e0a955e1c8df499e36741", + ), + ( + "sha3_384", + "d4abb2459941369aabf8880c5287b7eeb80678e14f13c71b9ecf64c772029dc3f93939590bea9ecdb51a1d1a74fefc5a", + ), + ( + "blake2b", + "48e70abac547ab38e2330e6e6743a0c0f6274dcaa6df2c98135a78a9dd5b04a072d551fc3851b34da03eb0bf50dd71c7f32a8c36956e99fd6c66491bc7844800", + ), + ( + "sha256", + "72e8531e49038c5f9c4a837b088bfcb8011f4a9f76335c8f0654df6ac539b3d6", + ), + ( + "sha512", + "e08a00a4b86358e49a318e7e3ba7a3d2fabdd17a2fef95559a0af681ea07ab1296b0b8e11e645297da296290661dc07ae3c8f74eab66bd18a80dce0c0ccb355b", + ), + ( + "sha384", + "aa3144e28c6700a83247e8ec8711af5d3f5f75997990d48ec41e66bd275b3d0e19ee6f2fe525a358f874aa717afd06a9", + ), + ("sha3_224", "64bfc6e4125b4c6d67fd88ad1c7d1b5c4dc11a1970e433cd576c91d4"), + ("sha1", "4c057579005ac3e68e951a11ffdc4b27c6ae16af"), + ( + "sha3_256", + "ba3d2a964b0680b6dc9565a03952e29c294c785d5a2307d3e2d785d73b75ed7e", + ), + ] + if hash_name in algorithms_guaranteed + ], +) +def test_guaranteed_hash(hash_name: str, expected: str) -> None: + path = DIST_PATH / TEST_FILE + dep = FileDependency("demo", path) + assert dep.hash(hash_name) == expected + + +def _test_file_dependency_pep_508( + mocker: MockerFixture, + name: str, + path: Path, + pep_508_input: str, + pep_508_output: str | None = None, + marker: BaseMarker | None = None, +) -> None: + mocker.patch.object(Path, "exists").return_value = True + mocker.patch.object(Path, "is_file").return_value = True + + dep = Dependency.create_from_pep_508( + pep_508_input, relative_to=Path(__file__).parent + ) + if marker: + dep.marker = marker + + assert dep.is_file() + dep = cast("FileDependency", dep) + assert dep.name == name + assert dep.path == path + assert dep.to_pep_508() == (pep_508_output or pep_508_input) + + +def test_file_dependency_pep_508_local_file_absolute(mocker: MockerFixture) -> None: + path = DIST_PATH / "demo-0.2.0.tar.gz" + expected = f"demo @ {path.as_uri()}" + + requirement = f"demo @ file://{path.as_posix()}" + _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected) + + requirement = f"demo @ {path}" + _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected) + + +def test_file_dependency_pep_508_local_file_localhost(mocker: MockerFixture) -> None: + path = DIST_PATH / "demo-0.2.0.tar.gz" + requirement = f"demo @ file://localhost{path.as_posix()}" + expected = f"demo @ {path.as_uri()}" + _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected) + + +def test_file_dependency_pep_508_local_file_relative_path( + mocker: MockerFixture, +) -> None: + path = Path("..") / "fixtures" / "distributions" / "demo-0.2.0.tar.gz" + + with pytest.raises(ValueError): + requirement = f"demo @ file://{path.as_posix()}" + _test_file_dependency_pep_508(mocker, "demo", path, requirement) + + requirement = f"demo @ {path}" + base = Path(__file__).parent + expected = f"demo @ {(base / path).resolve().as_uri()}" + _test_file_dependency_pep_508(mocker, "demo", path, requirement, expected) + + +def test_absolute_file_dependency_to_pep_508_with_marker(mocker: MockerFixture) -> None: + wheel = "demo-0.1.0-py2.py3-none-any.whl" + + abs_path = DIST_PATH / wheel + requirement = f'demo @ {abs_path.as_uri()} ; sys_platform == "linux"' + _test_file_dependency_pep_508( + mocker, + "demo", + abs_path, + requirement, + marker=SingleMarker("sys.platform", "linux"), + ) + + +def test_relative_file_dependency_to_pep_508_with_marker(mocker: MockerFixture) -> None: + wheel = "demo-0.1.0-py2.py3-none-any.whl" + + rel_path = Path("..") / "fixtures" / "distributions" / wheel + requirement = f'demo @ {rel_path.as_posix()} ; sys_platform == "linux"' + base = Path(__file__).parent + expected = ( + f'demo @ {(base / rel_path).resolve().as_uri()} ; sys_platform == "linux"' + ) + _test_file_dependency_pep_508( + mocker, + "demo", + rel_path, + requirement, + expected, + marker=SingleMarker("sys.platform", "linux"), + ) + + +def test_file_dependency_pep_508_extras(mocker: MockerFixture) -> None: + wheel = "demo-0.1.0-py2.py3-none-any.whl" + + rel_path = Path("..") / "fixtures" / "distributions" / wheel + requirement = f'demo[foo,bar] @ {rel_path.as_posix()} ; sys_platform == "linux"' + base = Path(__file__).parent + expected = ( + f"demo[bar,foo] @ {(base / rel_path).resolve().as_uri()} ;" + ' sys_platform == "linux"' + ) + _test_file_dependency_pep_508( + mocker, + "demo", + rel_path, + requirement, + expected, + ) + + +@pytest.mark.parametrize( + "name,path,extras,constraint,expected", + [ + ( + "demo", + DIST_PATH / TEST_FILE, + None, + None, + f"demo (*) @ {(DIST_PATH / TEST_FILE).as_uri()}", + ), + ( + "demo", + DIST_PATH / TEST_FILE, + ["foo"], + "1.2", + f"demo[foo] (1.2) @ {(DIST_PATH / TEST_FILE).as_uri()}", + ), + ], +) +def test_file_dependency_string_representation( + name: str, + path: Path, + extras: list[str] | None, + constraint: str | None, + expected: str, +) -> None: + dependency = FileDependency(name=name, path=path, extras=extras) + if constraint: + dependency.constraint = constraint # type: ignore[assignment] + assert str(dependency) == expected diff --git a/tests/packages/test_main.py b/tests/packages/test_main.py new file mode 100644 index 0000000..2dd465b --- /dev/null +++ b/tests/packages/test_main.py @@ -0,0 +1,331 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +from typing import cast + +from poetry.core.constraints.version import Version +from poetry.core.packages.dependency import Dependency + + +if TYPE_CHECKING: + from poetry.core.packages.url_dependency import URLDependency + from poetry.core.packages.vcs_dependency import VCSDependency + + +def test_dependency_from_pep_508() -> None: + name = "requests" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == name + assert str(dep.constraint) == "*" + + +def test_dependency_from_pep_508_with_version() -> None: + name = "requests==2.18.0" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + + +def test_dependency_from_pep_508_with_parens() -> None: + name = "requests (==2.18.0)" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + + +def test_dependency_from_pep_508_with_constraint() -> None: + name = "requests>=2.12.0,!=2.17.*,<3.0" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == ">=2.12.0,<2.17.0 || >=2.18.0,<3.0" + + +def test_dependency_from_pep_508_with_extras() -> None: + name = 'requests==2.18.0; extra == "foo" or extra == "bar"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.in_extras == ["foo", "bar"] + assert str(dep.marker) == 'extra == "foo" or extra == "bar"' + + +def test_dependency_from_pep_508_with_python_version() -> None: + name = 'requests (==2.18.0); python_version == "2.7" or python_version == "2.6"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.extras == frozenset() + assert dep.python_versions == "~2.7 || ~2.6" + assert str(dep.marker) == 'python_version == "2.7" or python_version == "2.6"' + + +def test_dependency_from_pep_508_with_single_python_version() -> None: + name = 'requests (==2.18.0); python_version == "2.7"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.extras == frozenset() + assert dep.python_versions == "~2.7" + assert str(dep.marker) == 'python_version == "2.7"' + + +def test_dependency_from_pep_508_with_platform() -> None: + name = 'requests (==2.18.0); sys_platform == "win32" or sys_platform == "darwin"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.extras == frozenset() + assert dep.python_versions == "*" + assert str(dep.marker) == 'sys_platform == "win32" or sys_platform == "darwin"' + + +def test_dependency_from_pep_508_complex() -> None: + name = ( + "requests (==2.18.0); " + 'python_version >= "2.7" and python_version != "3.2" ' + 'and (sys_platform == "win32" or sys_platform == "darwin") ' + 'and extra == "foo"' + ) + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.in_extras == ["foo"] + assert dep.python_versions == ">=2.7 !=3.2.*" + assert ( + str(dep.marker) + == 'python_version >= "2.7" and python_version != "3.2" ' + 'and (sys_platform == "win32" or sys_platform == "darwin") ' + 'and extra == "foo"' + ) + + +def test_dependency_python_version_in() -> None: + name = "requests (==2.18.0); python_version in '3.3 3.4 3.5'" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.python_versions == "3.3.* || 3.4.* || 3.5.*" + assert str(dep.marker) == 'python_version in "3.3 3.4 3.5"' + + +def test_dependency_python_version_in_comma() -> None: + name = "requests (==2.18.0); python_version in '3.3, 3.4, 3.5'" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.python_versions == "3.3.* || 3.4.* || 3.5.*" + assert str(dep.marker) == 'python_version in "3.3, 3.4, 3.5"' + + +def test_dependency_platform_in() -> None: + name = "requests (==2.18.0); sys_platform in 'win32 darwin'" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert str(dep.marker) == 'sys_platform in "win32 darwin"' + + +def test_dependency_with_extra() -> None: + name = "requests[security] (==2.18.0)" + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + + assert len(dep.extras) == 1 + assert "security" in dep.extras + + +def test_dependency_from_pep_508_with_python_version_union_of_multi() -> None: + name = ( + "requests (==2.18.0); " + '(python_version >= "2.7" and python_version < "2.8") ' + 'or (python_version >= "3.4" and python_version < "3.5")' + ) + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.extras == frozenset() + assert dep.python_versions == ">=2.7 <2.8 || >=3.4 <3.5" + assert ( + str(dep.marker) + == 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.4" and python_version < "3.5"' + ) + + +def test_dependency_from_pep_508_with_not_in_op_marker() -> None: + name = ( + 'jinja2 (>=2.7,<2.8); python_version not in "3.0,3.1,3.2" and extra == "export"' + ) + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "jinja2" + assert str(dep.constraint) == ">=2.7,<2.8" + assert dep.in_extras == ["export"] + assert dep.python_versions == "!=3.0.*, !=3.1.*, !=3.2.*" + assert ( + str(dep.marker) == 'python_version not in "3.0,3.1,3.2" and extra == "export"' + ) + + +def test_dependency_from_pep_508_with_git_url() -> None: + name = "django-utils @ git+ssh://git@corp-gitlab.com/corp-utils.git@1.2" + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "django-utils" + assert dep.is_vcs() + dep = cast("VCSDependency", dep) + assert dep.vcs == "git" + assert dep.source == "ssh://git@corp-gitlab.com/corp-utils.git" + assert dep.reference == "1.2" + + +def test_dependency_from_pep_508_with_git_url_and_subdirectory() -> None: + name = ( + "django-utils @" + " git+ssh://git@corp-gitlab.com/corp-utils.git@1.2#subdirectory=package-dir" + ) + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "django-utils" + assert dep.is_vcs() + dep = cast("VCSDependency", dep) + assert dep.vcs == "git" + assert dep.source == "ssh://git@corp-gitlab.com/corp-utils.git" + assert dep.reference == "1.2" + assert dep.directory == "package-dir" + + +def test_dependency_from_pep_508_with_git_url_and_comment_and_extra() -> None: + name = ( + "poetry @ git+https://github.com/python-poetry/poetry.git@b;ar;#egg=poetry" + ' ; extra == "foo;"' + ) + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "poetry" + assert dep.is_vcs() + dep = cast("VCSDependency", dep) + assert dep.vcs == "git" + assert dep.source == "https://github.com/python-poetry/poetry.git" + assert dep.reference == "b;ar;" + assert dep.in_extras == ["foo;"] + + +def test_dependency_from_pep_508_with_url() -> None: + name = "django-utils @ https://example.com/django-utils-1.0.0.tar.gz" + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "django-utils" + assert dep.is_url() + dep = cast("URLDependency", dep) + assert dep.url == "https://example.com/django-utils-1.0.0.tar.gz" + + +def test_dependency_from_pep_508_with_url_and_subdirectory() -> None: + name = ( + "django-utils @" + " https://example.com/django-utils-1.0.0.tar.gz#subdirectory=django" + ) + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "django-utils" + assert dep.is_url() + dep = cast("URLDependency", dep) + assert dep.url == "https://example.com/django-utils-1.0.0.tar.gz" + assert dep.directory == "django" + + +def test_dependency_from_pep_508_with_wheel_url() -> None: + name = ( + "example_wheel @ https://example.com/example_wheel-14.0.2-py2.py3-none-any.whl" + ) + + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "example-wheel" + assert str(dep.constraint) == "14.0.2" + + +def test_dependency_from_pep_508_with_python_full_version() -> None: + name = ( + "requests (==2.18.0); " + '(python_version >= "2.7" and python_version < "2.8") ' + 'or (python_full_version >= "3.4" and python_full_version < "3.5.4")' + ) + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "requests" + assert str(dep.constraint) == "2.18.0" + assert dep.extras == frozenset() + assert dep.python_versions == ">=2.7 <2.8 || >=3.4 <3.5.4" + assert ( + str(dep.marker) + == 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_full_version >= "3.4" and python_full_version < "3.5.4"' + ) + + +def test_dependency_from_pep_508_with_python_full_version_pep440_compatible_release_astrix() -> ( + None +): + name = 'pathlib2 ; python_version == "3.4.*" or python_version < "3"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "pathlib2" + assert str(dep.constraint) == "*" + assert dep.python_versions == "==3.4.* || <3" + + +def test_dependency_from_pep_508_with_python_full_version_pep440_compatible_release_tilde() -> ( + None +): + name = 'pathlib2 ; python_version ~= "3.4" or python_version < "3"' + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "pathlib2" + assert str(dep.constraint) == "*" + assert dep.python_versions == "~=3.4 || <3" + + +def test_dependency_from_pep_508_should_not_produce_empty_constraints_for_correct_markers() -> ( + None +): + name = ( + 'pytest-mypy; python_implementation != "PyPy" and python_version <= "3.10" and' + ' python_version > "3"' + ) + dep = Dependency.create_from_pep_508(name) + + assert dep.name == "pytest-mypy" + assert str(dep.constraint) == "*" + assert dep.python_versions == "<3.11 >=3" + assert dep.python_constraint.allows(Version.parse("3.6")) + assert dep.python_constraint.allows(Version.parse("3.10.4")) + assert dep.python_constraint.allows(Version.parse("3")) + assert dep.python_constraint.allows(Version.parse("3.0.1")) + assert ( + str(dep.marker) + == 'platform_python_implementation != "PyPy" and python_version <= "3.10" and' + ' python_version > "3"' + ) diff --git a/tests/packages/test_package.py b/tests/packages/test_package.py new file mode 100644 index 0000000..998b25e --- /dev/null +++ b/tests/packages/test_package.py @@ -0,0 +1,681 @@ +from __future__ import annotations + +import random + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import cast + +import pytest + +from poetry.core.constraints.version import Version +from poetry.core.constraints.version.exceptions import ParseConstraintError +from poetry.core.factory import Factory +from poetry.core.packages.dependency import Dependency +from poetry.core.packages.dependency_group import DependencyGroup +from poetry.core.packages.package import Package +from poetry.core.packages.project_package import ProjectPackage +from poetry.core.version.exceptions import InvalidVersion + + +if TYPE_CHECKING: + from poetry.core.packages.directory_dependency import DirectoryDependency + from poetry.core.packages.file_dependency import FileDependency + from poetry.core.packages.url_dependency import URLDependency + from poetry.core.packages.vcs_dependency import VCSDependency + + +@pytest.fixture() +def package_with_groups() -> Package: + package = Package("foo", "1.2.3") + + optional_group = DependencyGroup("optional", optional=True) + optional_group.add_dependency(Factory.create_dependency("bam", "^3.0.0")) + + package.add_dependency(Factory.create_dependency("bar", "^1.0.0")) + package.add_dependency(Factory.create_dependency("baz", "^1.1.0")) + package.add_dependency(Factory.create_dependency("bim", "^2.0.0", groups=["dev"])) + package.add_dependency_group(optional_group) + + return package + + +def test_package_authors() -> None: + package = Package("foo", "0.1.0") + + package.authors.append("Sébastien Eustace ") + assert package.author_name == "Sébastien Eustace" + assert package.author_email == "sebastien@eustace.io" + + package.authors.insert(0, "John Doe") + assert package.author_name == "John Doe" + assert package.author_email is None + + +def test_package_authors_invalid() -> None: + package = Package("foo", "0.1.0") + + package.authors.insert(0, "" + ) + + +@pytest.mark.parametrize( + ("name", "email"), + [ + ("Sébastien Eustace", "sebastien@eustace.io"), + ("John Doe", None), + ("'Jane Doe'", None), + ('"Jane Doe"', None), + ("MyCompany", None), + ("Some Company’s", None), + ("MyCompany's R&D", "rnd@MyCompanyName.MyTLD"), + ("Doe, John", None), + ("(Doe, John)", None), + ("John Doe", "john@john.doe"), + ("Doe, John", "dj@john.doe"), + ("MyCompanyName R&D", "rnd@MyCompanyName.MyTLD"), + ("John-Paul: Doe", None), + ("John-Paul: Doe", "jp@nomail.none"), + ("John Doe the 3rd", "3rd@jd.net"), + ], +) +def test_package_authors_valid(name: str, email: str | None) -> None: + package = Package("foo", "0.1.0") + + if email is None: + author = name + else: + author = f"{name} <{email}>" + package.authors.insert(0, author) + assert package.author_name == name + assert package.author_email == email + + +@pytest.mark.parametrize( + ("name",), + [ + ("",), + ("john@john.doe",), + (" None: + package = Package("foo", "0.1.0") + + package.authors.insert(0, name) + with pytest.raises(ValueError): + package.author_name + + +@pytest.mark.parametrize("groups", [["main"], ["dev"]]) +def test_package_add_dependency_vcs_groups(groups: list[str], f: Factory) -> None: + package = Package("foo", "0.1.0") + + dependency = package.add_dependency( + f.create_dependency( + "poetry", + {"git": "https://github.com/python-poetry/poetry.git"}, + groups=groups, + ) + ) + assert dependency.groups == frozenset(groups) + + +def test_package_add_dependency_vcs_groups_default_main(f: Factory) -> None: + package = Package("foo", "0.1.0") + + dependency = package.add_dependency( + f.create_dependency( + "poetry", {"git": "https://github.com/python-poetry/poetry.git"} + ) + ) + assert dependency.groups == frozenset(["main"]) + + +@pytest.mark.parametrize("groups", [["main"], ["dev"]]) +@pytest.mark.parametrize("optional", [True, False]) +def test_package_url_groups_optional( + groups: list[str], optional: bool, f: Factory +) -> None: + package = Package("foo", "0.1.0") + + dependency = package.add_dependency( + f.create_dependency( + "poetry", + { + "url": "https://github.com/python-poetry/poetry/releases/download/1.0.5/poetry-1.0.5-linux.tar.gz", + "optional": optional, + }, + groups=groups, + ) + ) + assert dependency.groups == frozenset(groups) + assert dependency.is_optional() == optional + + +def test_package_equality_simple() -> None: + assert Package("foo", "0.1.0") == Package("foo", "0.1.0") + assert Package("foo", "0.1.0") != Package("foo", "0.1.1") + assert Package("bar", "0.1.0") != Package("foo", "0.1.0") + + +def test_package_equality_source_type() -> None: + a1 = Package("a", "0.1.0", source_type="file") + a2 = Package(a1.name, a1.version, source_type="directory") + a3 = Package(a1.name, a1.version, source_type=a1.source_type) + a4 = Package(a1.name, a1.version) + + assert a1 == a1 + assert a1 == a3 + assert a1 != a2 + assert a2 != a3 + assert a1 != a4 + assert a2 != a4 + + +def test_package_equality_source_url() -> None: + a1 = Package("a", "0.1.0", source_type="file", source_url="/some/path") + a2 = Package( + a1.name, a1.version, source_type=a1.source_type, source_url="/some/other/path" + ) + a3 = Package( + a1.name, a1.version, source_type=a1.source_type, source_url=a1.source_url + ) + a4 = Package(a1.name, a1.version, source_type=a1.source_type) + + assert a1 == a1 + assert a1 == a3 + assert a1 != a2 + assert a2 != a3 + assert a1 != a4 + assert a2 != a4 + + +def test_package_equality_source_reference() -> None: + a1 = Package( + "a", + "0.1.0", + source_type="git", + source_url="https://foo.bar", + source_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef", + ) + a2 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_reference="a444731cd243cb5cd04e4d5fb81f86e1fecf8a00", + ) + a3 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef", + ) + a4 = Package(a1.name, a1.version, source_type="git") + + assert a1 == a1 + assert a1 == a3 + assert a1 != a2 + assert a2 != a3 + assert a1 != a4 + assert a2 != a4 + + +def test_package_resolved_reference_is_relevant_for_equality_only_if_present_for_both_packages() -> ( + None +): + a1 = Package( + "a", + "0.1.0", + source_type="git", + source_url="https://foo.bar", + source_reference="master", + source_resolved_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef", + ) + a2 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_reference="master", + source_resolved_reference="a444731cd243cb5cd04e4d5fb81f86e1fecf8a00", + ) + a3 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_reference="master", + source_resolved_reference="c01b317af582501c5ba07b23d5bef3fbada2d4ef", + ) + a4 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_reference="master", + ) + + assert a1 == a1 + assert a1 == a3 + assert a1 != a2 + assert a2 != a3 + assert a1 == a4 + assert a2 == a4 + + +def test_package_equality_source_subdirectory() -> None: + a1 = Package( + "a", + "0.1.0", + source_type="git", + source_url="https://foo.bar", + source_subdirectory="baz", + ) + a2 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_subdirectory="qux", + ) + a3 = Package( + a1.name, + a1.version, + source_type="git", + source_url="https://foo.bar", + source_subdirectory="baz", + ) + a4 = Package(a1.name, a1.version, source_type="git") + + assert a1 == a3 + assert a1 != a2 + assert a2 != a3 + assert a1 != a4 + assert a2 != a4 + + +def test_complete_name() -> None: + assert Package("foo", "1.2.3").complete_name == "foo" + assert ( + Package("foo", "1.2.3", features=["baz", "bar"]).complete_name == "foo[bar,baz]" + ) + + +def test_to_dependency() -> None: + package = Package("foo", "1.2.3") + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + + +def test_to_dependency_with_python_constraint() -> None: + package = Package("foo", "1.2.3") + package.python_versions = ">=3.6" + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.python_versions == ">=3.6" + + +def test_to_dependency_with_features() -> None: + package = Package("foo", "1.2.3", features=["baz", "bar"]) + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.features == frozenset({"bar", "baz"}) + + +def test_to_dependency_for_directory() -> None: + path = Path(__file__).parent.parent.joinpath("fixtures/simple_project") + package = Package( + "foo", + "1.2.3", + source_type="directory", + source_url=path.as_posix(), + features=["baz", "bar"], + ) + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.features == frozenset({"bar", "baz"}) + assert dep.is_directory() + dep = cast("DirectoryDependency", dep) + assert dep.path == path + assert dep.source_type == "directory" + assert dep.source_url == path.as_posix() + + +def test_to_dependency_for_file() -> None: + path = Path(__file__).parent.parent.joinpath( + "fixtures/distributions/demo-0.1.0.tar.gz" + ) + package = Package( + "foo", + "1.2.3", + source_type="file", + source_url=path.as_posix(), + features=["baz", "bar"], + ) + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.features == frozenset({"bar", "baz"}) + assert dep.is_file() + dep = cast("FileDependency", dep) + assert dep.path == path + assert dep.source_type == "file" + assert dep.source_url == path.as_posix() + + +def test_to_dependency_for_url() -> None: + package = Package( + "foo", + "1.2.3", + source_type="url", + source_url="https://example.com/path.tar.gz", + source_subdirectory="qux", + features=["baz", "bar"], + ) + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.features == frozenset({"bar", "baz"}) + assert dep.is_url() + dep = cast("URLDependency", dep) + assert dep.url == "https://example.com/path.tar.gz" + assert dep.source_type == "url" + assert dep.source_url == "https://example.com/path.tar.gz" + assert dep.source_subdirectory == "qux" + + +def test_to_dependency_for_vcs() -> None: + package = Package( + "foo", + "1.2.3", + source_type="git", + source_url="https://github.com/foo/foo.git", + source_reference="master", + source_resolved_reference="123456", + source_subdirectory="baz", + features=["baz", "bar"], + ) + dep = package.to_dependency() + + assert dep.name == "foo" + assert dep.constraint == package.version + assert dep.features == frozenset({"bar", "baz"}) + assert dep.is_vcs() + dep = cast("VCSDependency", dep) + assert dep.source_type == "git" + assert dep.source == "https://github.com/foo/foo.git" + assert dep.reference == "master" + assert dep.source_reference == "master" + assert dep.source_resolved_reference == "123456" + assert dep.directory == "baz" + assert dep.source_subdirectory == "baz" + + +def test_package_clone(f: Factory) -> None: + # TODO(nic): this test is not future-proof, in that any attributes added + # to the Package object and not filled out in this test setup might + # cause comparisons to match that otherwise should not. A factory method + # to create a Package object with all fields fully randomized would be the + # most rigorous test for this, but that's likely overkill. + p = Package( + "lol_wut", + "3.141.5926535", + pretty_version="③.⑭.⑮", + source_type="git", + source_url="http://some.url", + source_reference="fe4d2adabf3feb5d32b70ab5c105285fa713b10c", + source_resolved_reference="fe4d2adabf3feb5d32b70ab5c105285fa713b10c", + features=["abc", "def"], + develop=random.choice((True, False)), + ) + p.add_dependency(Factory.create_dependency("foo", "^1.2.3")) + p.add_dependency(Factory.create_dependency("foo", "^1.2.3", groups=["dev"])) + p.files = (["file1", "file2", "file3"],) # type: ignore[assignment] + p.homepage = "https://some.other.url" + p.repository_url = "http://bug.farm" + p.documentation_url = "http://lorem.ipsum/dolor/sit.amet" + p2 = p.clone() + + assert p == p2 + assert p.__dict__ == p2.__dict__ + assert len(p2.requires) == 1 + assert len(p2.all_requires) == 2 + + +def test_dependency_groups(package_with_groups: Package) -> None: + assert len(package_with_groups.requires) == 2 + assert len(package_with_groups.all_requires) == 4 + + +def test_without_dependency_groups(package_with_groups: Package) -> None: + package = package_with_groups.without_dependency_groups(["dev"]) + + assert len(package.requires) == 2 + assert len(package.all_requires) == 3 + + package = package_with_groups.without_dependency_groups(["dev", "optional"]) + + assert len(package.requires) == 2 + assert len(package.all_requires) == 2 + + +def test_with_dependency_groups(package_with_groups: Package) -> None: + package = package_with_groups.with_dependency_groups([]) + + assert len(package.requires) == 2 + assert len(package.all_requires) == 3 + + package = package_with_groups.with_dependency_groups(["optional"]) + + assert len(package.requires) == 2 + assert len(package.all_requires) == 4 + + +def test_without_optional_dependency_groups(package_with_groups: Package) -> None: + package = package_with_groups.without_optional_dependency_groups() + + assert len(package.requires) == 2 + assert len(package.all_requires) == 3 + + +def test_only_with_dependency_groups(package_with_groups: Package) -> None: + package = package_with_groups.with_dependency_groups(["dev"], only=True) + + assert len(package.requires) == 0 + assert len(package.all_requires) == 1 + + package = package_with_groups.with_dependency_groups(["dev", "optional"], only=True) + + assert len(package.requires) == 0 + assert len(package.all_requires) == 2 + + package = package_with_groups.with_dependency_groups(["main"], only=True) + + assert len(package.requires) == 2 + assert len(package.all_requires) == 2 + + +def test_get_readme_property_with_multiple_readme_files() -> None: + package = Package("foo", "0.1.0") + + package.readmes = (Path("README.md"), Path("HISTORY.md")) + with pytest.deprecated_call(): + assert package.readme == Path("README.md") + + +def test_set_readme_property() -> None: + package = Package("foo", "0.1.0") + + with pytest.deprecated_call(): + package.readme = Path("README.md") + + assert package.readmes == (Path("README.md"),) + with pytest.deprecated_call(): + assert package.readme == Path("README.md") + + +@pytest.mark.parametrize( + ("package", "dependency", "ignore_source_type", "result"), + [ + (Package("foo", "0.1.0"), Dependency("foo", ">=0.1.0"), False, True), + (Package("foo", "0.1.0"), Dependency("foo", "<0.1.0"), False, False), + ( + Package("foo", "0.1.0"), + Dependency("foo", ">=0.1.0", source_type="git"), + False, + False, + ), + ( + Package("foo", "0.1.0"), + Dependency("foo", ">=0.1.0", source_type="git"), + True, + True, + ), + ( + Package("foo", "0.1.0"), + Dependency("foo", "<0.1.0", source_type="git"), + True, + False, + ), + ], +) +def test_package_satisfies( + package: Package, dependency: Dependency, ignore_source_type: bool, result: bool +) -> None: + assert package.satisfies(dependency, ignore_source_type) == result + + +@pytest.mark.parametrize( + ("package_repo", "dependency_repo", "result"), + [ + ("pypi", None, True), + ("private", None, True), + ("pypi", "pypi", True), + ("private", "private", True), + ("pypi", "private", False), + ("private", "pypi", False), + ], +) +def test_package_satisfies_on_repositories( + package_repo: str, + dependency_repo: str | None, + result: bool, +) -> None: + source_type = None if package_repo == "pypi" else "legacy" + source_reference = None if package_repo == "pypi" else package_repo + package = Package( + "foo", "0.1.0", source_type=source_type, source_reference=source_reference + ) + + dependency = Dependency("foo", ">=0.1.0") + dependency.source_name = dependency_repo + + assert package.satisfies(dependency) == result + + +def test_package_pep592_default_not_yanked() -> None: + package = Package("foo", "1.0") + + assert not package.yanked + assert package.yanked_reason == "" + + +@pytest.mark.parametrize( + ("yanked", "expected_yanked", "expected_yanked_reason"), + [ + (True, True, ""), + (False, False, ""), + ("the reason", True, "the reason"), + ("", True, ""), + ], +) +def test_package_pep592_yanked( + yanked: str | bool, expected_yanked: bool, expected_yanked_reason: str +) -> None: + package = Package("foo", "1.0", yanked=yanked) + + assert package.yanked == expected_yanked + assert package.yanked_reason == expected_yanked_reason + + +def test_python_versions_are_made_precise() -> None: + package = Package("foo", "1.2.3") + package.python_versions = ">3.6,<=3.10" + + assert ( + str(package.python_marker) + == 'python_full_version > "3.6.0" and python_full_version <= "3.10.0"' + ) + assert str(package.python_constraint) == ">3.6,<=3.10" + + +def test_cannot_update_package_version() -> None: + package = Package("foo", "1.2.3") + with pytest.raises(AttributeError): + package.version = "1.2.4" # type: ignore[misc,assignment] + + +def test_project_package_version_update_string() -> None: + package = ProjectPackage("foo", "1.2.3") + package.version = "1.2.4" # type: ignore[assignment] + assert package.version.text == "1.2.4" + + +def test_project_package_version_update_version() -> None: + package = ProjectPackage("foo", "1.2.3") + package.version = Version.parse("1.2.4") + assert package.version.text == "1.2.4" + + +def test_project_package_hash_not_changed_when_version_is_changed() -> None: + package = ProjectPackage("foo", "1.2.3") + package_hash = hash(package) + package_clone = package.clone() + assert package == package_clone + assert hash(package) == hash(package_clone) + + package.version = Version.parse("1.2.4") + + assert hash(package) == package_hash, "Hash must not change!" + assert hash(package_clone) == package_hash + assert package != package_clone + + +def test_package_invalid_version() -> None: + with pytest.raises(InvalidVersion) as exc_info: + Package("foo", "1.2.3.bogus") + + expected = "Invalid version '1.2.3.bogus' on package foo" + assert str(exc_info.value) == expected + + +def test_package_invalid_python_versions() -> None: + package = Package("foo", "1.2.3") + with pytest.raises(ParseConstraintError) as exc_info: + package.python_versions = ">=3.6.y" + + expected = "Invalid python versions '>=3.6.y' on foo (1.2.3)" + assert str(exc_info.value) == expected diff --git a/tests/packages/test_specification.py b/tests/packages/test_specification.py new file mode 100644 index 0000000..79ec305 --- /dev/null +++ b/tests/packages/test_specification.py @@ -0,0 +1,117 @@ +from __future__ import annotations + +import pytest + +from poetry.core.packages.specification import PackageSpecification + + +@pytest.mark.parametrize( + "spec1, spec2, expected", + [ + (PackageSpecification("a"), PackageSpecification("a"), True), + (PackageSpecification("a", "type1"), PackageSpecification("a", "type1"), True), + (PackageSpecification("a", "type1"), PackageSpecification("a", "type2"), False), + (PackageSpecification("a"), PackageSpecification("a", "type1"), False), + (PackageSpecification("a", "type1"), PackageSpecification("a"), False), + ], +) +def test_is_same_package_source_type( + spec1: PackageSpecification, + spec2: PackageSpecification, + expected: bool, +) -> None: + assert spec1.is_same_package_as(spec2) == expected + + +@pytest.mark.parametrize( + ("source_type", "result"), + [ + ("directory", True), + ("file", True), + ("url", True), + ("git", True), + ("legacy", False), + (None, False), + ], +) +def test_is_direct_origin(source_type: str | None, result: bool) -> None: + assert PackageSpecification("package", source_type).is_direct_origin() == result + + +@pytest.mark.parametrize( + "spec1, spec2, expected", + [ + (PackageSpecification("a"), PackageSpecification("a"), True), + (PackageSpecification("a"), PackageSpecification("b"), False), + (PackageSpecification("a", features=["x"]), PackageSpecification("a"), True), + ( + PackageSpecification("a", features=["x"]), + PackageSpecification("a", features=["x"]), + True, + ), + ( + PackageSpecification("a", features=["x"]), + PackageSpecification("b", features=["x"]), + False, + ), + ( + PackageSpecification("a", features=["x"]), + PackageSpecification("a", features=["y"]), + False, + ), + ( + PackageSpecification("a", features=["x"]), + PackageSpecification("a", features=["x", "y"]), + False, + ), + ( + PackageSpecification("a", features=["x", "y"]), + PackageSpecification("a", features=["x"]), + True, + ), + ], +) +def test_specification_provides( + spec1: PackageSpecification, + spec2: PackageSpecification, + expected: bool, +) -> None: + assert spec1.provides(spec2) == expected + + +@pytest.mark.parametrize( + "spec1, spec2", + [ + ( + # nothing except for name and features matters if no source + PackageSpecification("a", None, "url1", "ref1", "resref1", "sub1"), + PackageSpecification("a", None, "url2", "ref2", "resref2", "sub2"), + ), + ( + # ref does not matter if resolved ref is equal + PackageSpecification("a", "type", "url", "ref1", "resref1"), + PackageSpecification("a", "type", "url", "ref2", "resref1"), + ), + ( + # resolved ref does not matter if no ref + PackageSpecification("a", "type", "url", None, "resref1"), + PackageSpecification("a", "type", "url", None, "resref2"), + ), + ( + # resolved ref unset when ref starts with other + PackageSpecification("a", "type", "url", "ref/a", "resref1"), + PackageSpecification("a", "type", "url", "ref", None), + ), + ( + # resolved ref unset when ref starts with other + PackageSpecification("a", "type", "url", "ref/a", None), + PackageSpecification("a", "type", "url", "ref", "resref2"), + ), + ], +) +def test_equal_specifications_have_same_hash( + spec1: PackageSpecification, spec2: PackageSpecification +) -> None: + assert spec1 == spec2 + assert spec2 == spec1 + assert hash(spec1) == hash(spec2) diff --git a/tests/packages/test_url_dependency.py b/tests/packages/test_url_dependency.py new file mode 100644 index 0000000..bc8ce2a --- /dev/null +++ b/tests/packages/test_url_dependency.py @@ -0,0 +1,91 @@ +from __future__ import annotations + +import pytest + +from poetry.core.packages.url_dependency import URLDependency +from poetry.core.version.markers import SingleMarker + + +def test_to_pep_508() -> None: + dependency = URLDependency( + "pytorch", + "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl", + ) + + expected = ( + "pytorch @" + " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl" + ) + assert dependency.to_pep_508() == expected + + +def test_to_pep_508_with_extras() -> None: + dependency = URLDependency( + "pytorch", + "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl", + extras=["foo", "bar"], + ) + + expected = ( + "pytorch[bar,foo] @" + " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl" + ) + assert expected == dependency.to_pep_508() + + +def test_to_pep_508_with_subdirectory() -> None: + dependency = URLDependency( + "demo", + "https://github.com/foo/bar/archive/0.1.0.zip", + directory="baz", + ) + + expected = "demo @ https://github.com/foo/bar/archive/0.1.0.zip#subdirectory=baz" + assert expected == dependency.to_pep_508() + + +def test_to_pep_508_with_marker() -> None: + dependency = URLDependency( + "pytorch", + "https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl", + ) + dependency.marker = SingleMarker("sys.platform", "linux") + + expected = ( + "pytorch @" + " https://download.pytorch.org/whl/cpu/torch-1.5.1%2Bcpu-cp38-cp38-linux_x86_64.whl" + ' ; sys_platform == "linux"' + ) + assert dependency.to_pep_508() == expected + + +@pytest.mark.parametrize( + "name,url,extras,constraint,expected", + [ + ( + "example", + "https://example.org/example.whl", + None, + None, + "example (*) @ https://example.org/example.whl", + ), + ( + "example", + "https://example.org/example.whl", + ["foo"], + "1.2", + "example[foo] (1.2) @ https://example.org/example.whl", + ), + ], +) +def test_directory_dependency_string_representation( + name: str, + url: str, + extras: list[str] | None, + constraint: str | None, + expected: str, +) -> None: + dependency = URLDependency(name=name, url=url, extras=extras) + if constraint: + dependency.constraint = constraint # type: ignore[assignment] + assert str(dependency) == expected diff --git a/tests/packages/test_vcs_dependency.py b/tests/packages/test_vcs_dependency.py new file mode 100644 index 0000000..0d9d019 --- /dev/null +++ b/tests/packages/test_vcs_dependency.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from typing import Any + +import pytest + +from packaging.utils import canonicalize_name + +from poetry.core.packages.vcs_dependency import VCSDependency + + +@pytest.mark.parametrize( + "kwargs, expected", + [ + ({}, "poetry @ git+https://github.com/python-poetry/poetry.git"), + ( + {"extras": ["foo"]}, + "poetry[foo] @ git+https://github.com/python-poetry/poetry.git", + ), + ( + {"extras": ["foo", "bar"]}, + "poetry[bar,foo] @ git+https://github.com/python-poetry/poetry.git", + ), + ( + {"branch": "main"}, + "poetry @ git+https://github.com/python-poetry/poetry.git@main", + ), + ( + {"tag": "1.0"}, + "poetry @ git+https://github.com/python-poetry/poetry.git@1.0", + ), + ( + {"rev": "12345"}, + "poetry @ git+https://github.com/python-poetry/poetry.git@12345", + ), + ( + {"directory": "sub"}, + "poetry @ git+https://github.com/python-poetry/poetry.git#subdirectory=sub", + ), + ( + {"branch": "main", "directory": "sub"}, + ( + "poetry @ git+https://github.com/python-poetry/poetry.git" + "@main#subdirectory=sub" + ), + ), + ], +) +def test_to_pep_508(kwargs: dict[str, Any], expected: str) -> None: + dependency = VCSDependency( + "poetry", "git", "https://github.com/python-poetry/poetry.git", **kwargs + ) + + assert dependency.to_pep_508() == expected + + +def test_to_pep_508_ssh() -> None: + dependency = VCSDependency("poetry", "git", "git@github.com:sdispater/poetry.git") + + expected = "poetry @ git+ssh://git@github.com/sdispater/poetry.git" + + assert dependency.to_pep_508() == expected + + +def test_to_pep_508_in_extras() -> None: + dependency = VCSDependency( + "poetry", "git", "https://github.com/python-poetry/poetry.git" + ) + dependency.in_extras.append(canonicalize_name("foo")) + + expected = ( + 'poetry @ git+https://github.com/python-poetry/poetry.git ; extra == "foo"' + ) + assert dependency.to_pep_508() == expected + + dependency = VCSDependency( + "poetry", "git", "https://github.com/python-poetry/poetry.git", extras=["bar"] + ) + dependency.in_extras.append(canonicalize_name("foo")) + + expected = ( + 'poetry[bar] @ git+https://github.com/python-poetry/poetry.git ; extra == "foo"' + ) + + assert dependency.to_pep_508() == expected + + dependency = VCSDependency( + "poetry", "git", "https://github.com/python-poetry/poetry.git", "b;ar;" + ) + dependency.in_extras.append(canonicalize_name("foo;")) + + expected = ( + "poetry @ git+https://github.com/python-poetry/poetry.git@b;ar; ; extra ==" + ' "foo;"' + ) + + assert dependency.to_pep_508() == expected + + +@pytest.mark.parametrize( + "name,source,branch,extras,constraint,expected", + [ + ( + "example", + "https://example.org/example.git", + "main", + None, + None, + "example (*) @ git+https://example.org/example.git@main", + ), + ( + "example", + "https://example.org/example.git", + "main", + ["foo"], + "1.2", + "example[foo] (1.2) @ git+https://example.org/example.git@main", + ), + ], +) +def test_directory_dependency_string_representation( + name: str, + source: str, + branch: str, + extras: list[str] | None, + constraint: str | None, + expected: str, +) -> None: + dependency = VCSDependency( + name=name, vcs="git", source=source, branch=branch, extras=extras + ) + if constraint: + dependency.constraint = constraint # type: ignore[assignment] + assert str(dependency) == expected + + +@pytest.mark.parametrize("groups", [["main"], ["dev"]]) +def test_category(groups: list[str]) -> None: + dependency = VCSDependency( + "poetry", + "git", + "https://github.com/python-poetry/poetry.git", + groups=groups, + ) + assert dependency.groups == frozenset(groups) + + +def test_vcs_dependency_can_have_resolved_reference_specified() -> None: + dependency = VCSDependency( + "poetry", + "git", + "https://github.com/python-poetry/poetry.git", + branch="develop", + resolved_rev="123456", + ) + + assert dependency.branch == "develop" + assert dependency.source_reference == "develop" + assert dependency.source_resolved_reference == "123456" + + +def test_vcs_dependencies_are_equal_if_resolved_references_match() -> None: + dependency1 = VCSDependency( + "poetry", + "git", + "https://github.com/python-poetry/poetry.git", + branch="develop", + resolved_rev="123456", + ) + dependency2 = VCSDependency( + "poetry", + "git", + "https://github.com/python-poetry/poetry.git", + rev="123", + resolved_rev="123456", + ) + + assert dependency1 == dependency2 diff --git a/tests/packages/utils/__init__.py b/tests/packages/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/packages/utils/test_utils.py b/tests/packages/utils/test_utils.py new file mode 100644 index 0000000..a994975 --- /dev/null +++ b/tests/packages/utils/test_utils.py @@ -0,0 +1,250 @@ +from __future__ import annotations + +from pathlib import Path + +import pytest + +from poetry.core.constraints.generic import parse_constraint as parse_generic_constraint +from poetry.core.constraints.version import parse_constraint as parse_version_constraint +from poetry.core.packages.utils.utils import convert_markers +from poetry.core.packages.utils.utils import create_nested_marker +from poetry.core.packages.utils.utils import get_python_constraint_from_marker +from poetry.core.packages.utils.utils import is_python_project +from poetry.core.version.markers import parse_marker + + +@pytest.mark.parametrize( + "marker, expected", + [ + ( + ( + 'sys_platform == "win32" and python_version < "3.6" or sys_platform ==' + ' "linux" and python_version < "3.6" and python_version >= "3.3" or' + ' sys_platform == "darwin" and python_version < "3.3"' + ), + { + "python_version": [ + [("<", "3.6")], + [("<", "3.6"), (">=", "3.3")], + [("<", "3.3")], + ], + "sys_platform": [ + [("==", "win32")], + [("==", "linux")], + [("==", "darwin")], + ], + }, + ), + ( + ( + 'sys_platform == "win32" and python_version < "3.6" or sys_platform ==' + ' "win32" and python_version < "3.6" and python_version >= "3.3" or' + ' sys_platform == "win32" and python_version < "3.3"' + ), + {"python_version": [[("<", "3.6")]], "sys_platform": [[("==", "win32")]]}, + ), + ( + 'python_version == "2.7" or python_version == "2.6"', + {"python_version": [[("==", "2.7")], [("==", "2.6")]]}, + ), + ( + ( + '(python_version < "2.7" or python_full_version >= "3.0.0") and' + ' python_full_version < "3.6.0"' + ), + {"python_version": [[("<", "2.7")], [(">=", "3.0.0"), ("<", "3.6.0")]]}, + ), + ( + ( + '(python_version < "2.7" or python_full_version >= "3.0.0") and' + ' extra == "foo"' + ), + { + "extra": [[("==", "foo")]], + "python_version": [[("<", "2.7")], [(">=", "3.0.0")]], + }, + ), + ( + 'python_version >= "3.9" or sys_platform == "linux"', + { + "python_version": [[(">=", "3.9")], []], + "sys_platform": [[], [("==", "linux")]], + }, + ), + ( + 'python_version >= "3.9" and sys_platform == "linux"', + { + "python_version": [[(">=", "3.9")]], + "sys_platform": [[("==", "linux")]], + }, + ), + ], +) +def test_convert_markers( + marker: str, expected: dict[str, list[list[tuple[str, str]]]] +) -> None: + parsed_marker = parse_marker(marker) + converted = convert_markers(parsed_marker) + assert converted == expected + + +@pytest.mark.parametrize( + ["constraint", "expected"], + [ + ("*", ""), + ("==linux", 'sys_platform == "linux"'), + ("!=win32", 'sys_platform != "win32"'), + ("!=linux, !=win32", 'sys_platform != "linux" and sys_platform != "win32"'), + ("==linux || ==win32", 'sys_platform == "linux" or sys_platform == "win32"'), + ], +) +def test_create_nested_marker_base_constraint(constraint: str, expected: str) -> None: + assert ( + create_nested_marker("sys_platform", parse_generic_constraint(constraint)) + == expected + ) + + +@pytest.mark.parametrize( + ["constraint", "expected"], + [ + ("*", ""), + # simple version + ("3", 'python_version == "3"'), + ("3.9", 'python_version == "3.9"'), + ("3.9.0", 'python_full_version == "3.9.0"'), + ("3.9.1", 'python_full_version == "3.9.1"'), + # min + (">=3", 'python_version >= "3"'), + (">=3.9", 'python_version >= "3.9"'), + (">=3.9.0", 'python_full_version >= "3.9.0"'), + (">=3.9.1", 'python_full_version >= "3.9.1"'), + (">3", 'python_full_version > "3.0.0"'), + (">3.9", 'python_full_version > "3.9.0"'), + (">3.9.0", 'python_full_version > "3.9.0"'), + (">3.9.1", 'python_full_version > "3.9.1"'), + # max + ("<3", 'python_version < "3"'), + ("<3.9", 'python_version < "3.9"'), + ("<3.9.0", 'python_full_version < "3.9.0"'), + ("<3.9.1", 'python_full_version < "3.9.1"'), + ("<=3", 'python_full_version <= "3.0.0"'), + ("<=3.9", 'python_full_version <= "3.9.0"'), + ("<=3.9.0", 'python_full_version <= "3.9.0"'), + ("<=3.9.1", 'python_full_version <= "3.9.1"'), + # min and max + (">=3.7, <3.9", 'python_version >= "3.7" and python_version < "3.9"'), + (">=3.7, <=3.9", 'python_version >= "3.7" and python_full_version <= "3.9.0"'), + (">3.7, <3.9", 'python_full_version > "3.7.0" and python_version < "3.9"'), + ( + ">3.7, <=3.9", + 'python_full_version > "3.7.0" and python_full_version <= "3.9.0"', + ), + # union + ("<3.7 || >=3.8", '(python_version < "3.7") or (python_version >= "3.8")'), + ( + ">=3.7,<3.8 || >=3.9,<=3.10", + ( + '(python_version >= "3.7" and python_version < "3.8")' + ' or (python_version >= "3.9" and python_full_version <= "3.10.0")' + ), + ), + ], +) +def test_create_nested_marker_version_constraint( + constraint: str, + expected: str, +) -> None: + assert ( + create_nested_marker("python_version", parse_version_constraint(constraint)) + == expected + ) + + +@pytest.mark.parametrize( + ["marker", "constraint"], + [ + # == + ('python_version == "3.6"', "~3.6"), + ('python_version == "3.6.*"', "==3.6.*"), + ('python_version == "3.6.* "', "==3.6.*"), + # != + ('python_version != "3.6"', "!=3.6.*"), + ('python_version != "3.6.*"', "!=3.6.*"), + ('python_version != "3.6.* "', "!=3.6.*"), + # <, <=, >, >= precision 1 + ('python_version < "3"', "<3"), + ('python_version <= "3"', "<3"), + ('python_version > "3"', ">=3"), + ('python_version >= "3"', ">=3"), + # <, <=, >, >= precision 2 + ('python_version < "3.6"', "<3.6"), + ('python_version <= "3.6"', "<3.7"), + ('python_version > "3.6"', ">=3.7"), + ('python_version >= "3.6"', ">=3.6"), + # in, not in + ('python_version in "2.7, 3.6"', ">=2.7.0,<2.8.0 || >=3.6.0,<3.7.0"), + ('python_version in "2.7, 3.6.2"', ">=2.7.0,<2.8.0 || 3.6.2"), + ('python_version not in "2.7, 3.6"', "<2.7.0 || >=2.8.0,<3.6.0 || >=3.7.0"), + ('python_version not in "2.7, 3.6.2"', "<2.7.0 || >=2.8.0,<3.6.2 || >3.6.2"), + # and + ('python_version >= "3.6" and python_full_version < "4.0"', ">=3.6, <4.0"), + ( + 'python_full_version >= "3.6.1" and python_full_version < "4.0.0"', + ">=3.6.1, <4.0.0", + ), + # or + ('python_version < "3.6" or python_version >= "3.9"', "<3.6 || >=3.9"), + # and or + ( + ( + 'python_version >= "3.7" and python_version < "3.8" or python_version' + ' >= "3.9" and python_version < "3.10"' + ), + ">=3.7,<3.8 || >=3.9,<3.10", + ), + ( + ( + '(python_version < "2.7" or python_full_version >= "3.0.0") and' + ' python_full_version < "3.6.0"' + ), + "<2.7 || >=3.0,<3.6", + ), + # no python_version + ('sys_platform == "linux"', "*"), + # no relevant python_version + ('python_version >= "3.9" or sys_platform == "linux"', "*"), + # relevant python_version + ('python_version >= "3.9" and sys_platform == "linux"', ">=3.9"), + # exclude specific version + ( + 'python_version >= "3.5" and python_full_version != "3.7.6"', + ">=3.5,<3.7.6 || >3.7.6", + ), + # Full exact version + ( + 'python_full_version == "3.6.1"', + "3.6.1", + ), + ], +) +def test_get_python_constraint_from_marker(marker: str, constraint: str) -> None: + marker_parsed = parse_marker(marker) + constraint_parsed = parse_version_constraint(constraint) + assert get_python_constraint_from_marker(marker_parsed) == constraint_parsed + + +@pytest.mark.parametrize( + ("fixture", "result"), + [ + ("simple_project", True), + ("project_with_setup_cfg_only", True), + ("project_with_setup", True), + ("project_with_pep517_non_poetry", True), + ("project_without_pep517", False), + ("does_not_exist", False), + ], +) +def test_is_python_project(fixture: str, result: bool) -> None: + path = Path(__file__).parent.parent.parent / "fixtures" / fixture + assert is_python_project(path) == result diff --git a/tests/packages/utils/test_utils_link.py b/tests/packages/utils/test_utils_link.py new file mode 100644 index 0000000..b495875 --- /dev/null +++ b/tests/packages/utils/test_utils_link.py @@ -0,0 +1,145 @@ +from __future__ import annotations + +import uuid + +from hashlib import sha256 + +import pytest + +from poetry.core.packages.utils.link import Link + + +def make_checksum() -> str: + return sha256(str(uuid.uuid4()).encode()).hexdigest() + + +@pytest.fixture() +def file_checksum() -> str: + return make_checksum() + + +@pytest.fixture() +def metadata_checksum() -> str: + return make_checksum() + + +def make_url( + ext: str, file_checksum: str | None = None, metadata_checksum: str | None = None +) -> Link: + file_checksum = file_checksum or make_checksum() + return Link( + "https://files.pythonhosted.org/packages/16/52/dead/" + f"demo-1.0.0.{ext}#sha256={file_checksum}", + metadata=f"sha256={metadata_checksum}" if metadata_checksum else None, + ) + + +def test_package_link_hash(file_checksum: str) -> None: + link = make_url(ext="whl", file_checksum=file_checksum) + assert link.hash_name == "sha256" + assert link.hash == file_checksum + assert link.show_url == "demo-1.0.0.whl" + + # this is legacy PEP 503, no metadata hash is present + assert not link.has_metadata + assert not link.metadata_url + assert not link.metadata_hash + assert not link.metadata_hash_name + + +@pytest.mark.parametrize( + ("ext", "check"), + [ + ("whl", "wheel"), + ("egg", "egg"), + ("tar.gz", "sdist"), + ("zip", "sdist"), + ("cp36-cp36m-manylinux1_x86_64.whl", "wheel"), + ], +) +def test_package_link_is_checks(ext: str, check: str) -> None: + link = make_url(ext=ext) + assert getattr(link, f"is_{check}") + + +@pytest.mark.parametrize( + ("ext", "has_metadata"), + [("whl", True), ("egg", False), ("tar.gz", True), ("zip", True)], +) +def test_package_link_pep658( + ext: str, has_metadata: bool, metadata_checksum: str +) -> None: + link = make_url(ext=ext, metadata_checksum=metadata_checksum) + + if has_metadata: + assert link.has_metadata + assert link.metadata_url == f"{link.url_without_fragment}.metadata" + assert link.metadata_hash == metadata_checksum + assert link.metadata_hash_name == "sha256" + else: + assert not link.has_metadata + assert not link.metadata_url + assert not link.metadata_hash + assert not link.metadata_hash_name + + +def test_package_link_pep658_no_default_metadata() -> None: + link = make_url(ext="whl") + + assert not link.has_metadata + assert not link.metadata_url + assert not link.metadata_hash + assert not link.metadata_hash_name + + +@pytest.mark.parametrize( + ("metadata", "has_metadata"), + [ + ("true", True), + ("false", False), + ("", False), + ], +) +def test_package_link_pep653_non_hash_metadata_value( + file_checksum: str, metadata: str | bool, has_metadata: bool +) -> None: + link = Link( + "https://files.pythonhosted.org/packages/16/52/dead/" + f"demo-1.0.0.whl#sha256={file_checksum}", + metadata=metadata, + ) + + if has_metadata: + assert link.has_metadata + assert link.metadata_url == f"{link.url_without_fragment}.metadata" + else: + assert not link.has_metadata + assert not link.metadata_url + + assert not link.metadata_hash + assert not link.metadata_hash_name + + +def test_package_link_pep592_default_not_yanked() -> None: + link = make_url(ext="whl") + + assert not link.yanked + assert link.yanked_reason == "" + + +@pytest.mark.parametrize( + ("yanked", "expected_yanked", "expected_yanked_reason"), + [ + (True, True, ""), + (False, False, ""), + ("the reason", True, "the reason"), + ("", True, ""), + ], +) +def test_package_link_pep592_yanked( + yanked: str | bool, expected_yanked: bool, expected_yanked_reason: str +) -> None: + link = Link("https://example.org", yanked=yanked) + + assert link.yanked == expected_yanked + assert link.yanked_reason == expected_yanked_reason diff --git a/tests/packages/utils/test_utils_urls.py b/tests/packages/utils/test_utils_urls.py new file mode 100644 index 0000000..6480c78 --- /dev/null +++ b/tests/packages/utils/test_utils_urls.py @@ -0,0 +1,64 @@ +# These test scenarios are ported over from pypa/pip +# https://raw.githubusercontent.com/pypa/pip/b447f438df08303f4f07f2598f190e73876443ba/tests/unit/test_urls.py + +from __future__ import annotations + +import sys + +from pathlib import Path + +import pytest + +from poetry.core.packages.utils.utils import path_to_url +from poetry.core.packages.utils.utils import url_to_path + + +@pytest.mark.skipif("sys.platform == 'win32'") +def test_path_to_url_unix() -> None: + assert path_to_url("/tmp/file") == "file:///tmp/file" + path = Path(".") / "file" + assert path_to_url("file") == "file://" + path.absolute().as_posix() + + +@pytest.mark.skipif("sys.platform != 'win32'") +def test_path_to_url_win() -> None: + assert path_to_url("c:/tmp/file") == "file:///c:/tmp/file" + assert path_to_url("c:\\tmp\\file") == "file:///c:/tmp/file" + assert path_to_url(r"\\unc\as\path") == "file://unc/as/path" + path = Path(".") / "file" + assert path_to_url("file") == "file:///" + path.absolute().as_posix() + + +@pytest.mark.parametrize( + "url,win_expected,non_win_expected", + [ + ("file:tmp", "tmp", "tmp"), + ("file:c:/path/to/file", r"C:\path\to\file", "c:/path/to/file"), + ("file:/path/to/file", r"\path\to\file", "/path/to/file"), + ("file://localhost/tmp/file", r"\tmp\file", "/tmp/file"), + ("file://localhost/c:/tmp/file", r"C:\tmp\file", "/c:/tmp/file"), + ("file://somehost/tmp/file", r"\\somehost\tmp\file", None), + ("file:///tmp/file", r"\tmp\file", "/tmp/file"), + ("file:///c:/tmp/file", r"C:\tmp\file", "/c:/tmp/file"), + ], +) +def test_url_to_path(url: str, win_expected: str, non_win_expected: str | None) -> None: + if sys.platform == "win32": + expected_path = win_expected + else: + expected_path = non_win_expected + + if expected_path is None: + with pytest.raises(ValueError): + url_to_path(url) + else: + assert url_to_path(url) == Path(expected_path) + + +@pytest.mark.skipif("sys.platform != 'win32'") +def test_url_to_path_path_to_url_symmetry_win() -> None: + path = r"C:\tmp\file" + assert url_to_path(path_to_url(path)) == Path(path) + + unc_path = r"\\unc\share\path" + assert url_to_path(path_to_url(unc_path)) == Path(unc_path) diff --git a/tests/pyproject/__init__.py b/tests/pyproject/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/pyproject/conftest.py b/tests/pyproject/conftest.py new file mode 100644 index 0000000..82ff219 --- /dev/null +++ b/tests/pyproject/conftest.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + + +if TYPE_CHECKING: + from pathlib import Path + + +@pytest.fixture +def pyproject_toml(tmp_path: Path) -> Path: + path = tmp_path / "pyproject.toml" + with path.open(mode="w"): + pass + return path + + +@pytest.fixture +def build_system_section(pyproject_toml: Path) -> str: + content = """ +[build-system] +requires = ["poetry-core"] +build-backend = "poetry.core.masonry.api" +""" + with pyproject_toml.open(mode="a") as f: + f.write(content) + return content + + +@pytest.fixture +def poetry_section(pyproject_toml: Path) -> str: + content = """ +[tool.poetry] +name = "poetry" + +[tool.poetry.dependencies] +python = "^3.5" +""" + with pyproject_toml.open(mode="a") as f: + f.write(content) + return content diff --git a/tests/pyproject/test_pyproject_toml.py b/tests/pyproject/test_pyproject_toml.py new file mode 100644 index 0000000..dc0927e --- /dev/null +++ b/tests/pyproject/test_pyproject_toml.py @@ -0,0 +1,113 @@ +from __future__ import annotations + +import uuid + +from pathlib import Path +from typing import Any + +import pytest + +from tomlkit.toml_document import TOMLDocument +from tomlkit.toml_file import TOMLFile + +from poetry.core.pyproject.exceptions import PyProjectException +from poetry.core.pyproject.toml import PyProjectTOML + + +def test_pyproject_toml_simple( + pyproject_toml: Path, build_system_section: str, poetry_section: str +) -> None: + data = TOMLFile(pyproject_toml.as_posix()).read() + assert PyProjectTOML(pyproject_toml).data == data + + +def test_pyproject_toml_no_poetry_config(pyproject_toml: Path) -> None: + pyproject = PyProjectTOML(pyproject_toml) + + assert not pyproject.is_poetry_project() + + with pytest.raises(PyProjectException) as excval: + _ = pyproject.poetry_config + + assert f"[tool.poetry] section not found in {pyproject_toml.as_posix()}" in str( + excval.value + ) + + +def test_pyproject_toml_poetry_config( + pyproject_toml: Path, poetry_section: str +) -> None: + pyproject = PyProjectTOML(pyproject_toml) + doc: dict[str, Any] = TOMLFile(pyproject_toml.as_posix()).read() + config = doc["tool"]["poetry"] + + assert pyproject.is_poetry_project() + assert pyproject.poetry_config == config + + +def test_pyproject_toml_no_build_system_defaults() -> None: + pyproject_toml = ( + Path(__file__).parent.parent + / "fixtures" + / "project_with_build_system_requires" + / "pyproject.toml" + ) + + build_system = PyProjectTOML(pyproject_toml).build_system + assert build_system.requires == ["poetry-core", "Cython~=0.29.6"] + + assert len(build_system.dependencies) == 2 + assert build_system.dependencies[0].to_pep_508() == "poetry-core" + assert build_system.dependencies[1].to_pep_508() == "Cython (>=0.29.6,<0.30.0)" + + +def test_pyproject_toml_build_requires_as_dependencies(pyproject_toml: Path) -> None: + build_system = PyProjectTOML(pyproject_toml).build_system + assert build_system.requires == ["setuptools", "wheel"] + assert build_system.build_backend == "setuptools.build_meta:__legacy__" + + +def test_pyproject_toml_non_existent(pyproject_toml: Path) -> None: + pyproject_toml.unlink() + pyproject = PyProjectTOML(pyproject_toml) + build_system = pyproject.build_system + + assert pyproject.data == TOMLDocument() + assert build_system.requires == ["poetry-core"] + assert build_system.build_backend == "poetry.core.masonry.api" + + +def test_pyproject_toml_reload(pyproject_toml: Path, poetry_section: str) -> None: + pyproject = PyProjectTOML(pyproject_toml) + name_original = pyproject.poetry_config["name"] + name_new = str(uuid.uuid4()) + + pyproject.poetry_config["name"] = name_new + assert isinstance(pyproject.poetry_config["name"], str) + assert pyproject.poetry_config["name"] == name_new + + pyproject.reload() + assert pyproject.poetry_config["name"] == name_original + + +def test_pyproject_toml_save( + pyproject_toml: Path, poetry_section: str, build_system_section: str +) -> None: + pyproject = PyProjectTOML(pyproject_toml) + + name = str(uuid.uuid4()) + build_backend = str(uuid.uuid4()) + build_requires = str(uuid.uuid4()) + + pyproject.poetry_config["name"] = name + pyproject.build_system.build_backend = build_backend + pyproject.build_system.requires.append(build_requires) + + pyproject.save() + + pyproject = PyProjectTOML(pyproject_toml) + + assert isinstance(pyproject.poetry_config["name"], str) + assert pyproject.poetry_config["name"] == name + assert pyproject.build_system.build_backend == build_backend + assert build_requires in pyproject.build_system.requires diff --git a/tests/pyproject/test_pyproject_toml_file.py b/tests/pyproject/test_pyproject_toml_file.py new file mode 100644 index 0000000..95fd20f --- /dev/null +++ b/tests/pyproject/test_pyproject_toml_file.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.exceptions import PoetryCoreException +from poetry.core.toml import TOMLFile + + +if TYPE_CHECKING: + from pathlib import Path + + +def test_old_pyproject_toml_file_deprecation( + pyproject_toml: Path, build_system_section: str, poetry_section: str +) -> None: + from poetry.core.utils.toml_file import TomlFile + + with pytest.warns(DeprecationWarning): + file = TomlFile(pyproject_toml) + + data = file.read() + assert data == TOMLFile(pyproject_toml).read() + + +def test_pyproject_toml_file_invalid(pyproject_toml: Path) -> None: + with pyproject_toml.open(mode="a") as f: + f.write("<<<<<<<<<<<") + + with pytest.raises(PoetryCoreException) as excval: + _ = TOMLFile(pyproject_toml).read() + + assert f"Invalid TOML file {pyproject_toml.as_posix()}" in str(excval.value) + + +def test_pyproject_toml_file_getattr(tmp_path: Path, pyproject_toml: Path) -> None: + file = TOMLFile(pyproject_toml) + assert file.parent == tmp_path diff --git a/tests/spdx/__init__.py b/tests/spdx/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/spdx/test_helpers.py b/tests/spdx/test_helpers.py new file mode 100644 index 0000000..add838d --- /dev/null +++ b/tests/spdx/test_helpers.py @@ -0,0 +1,54 @@ +from __future__ import annotations + +import pytest + +from poetry.core.spdx.helpers import license_by_id + + +def test_license_by_id() -> None: + license = license_by_id("MIT") + + assert license.id == "MIT" + assert license.name == "MIT License" + assert license.is_osi_approved + assert not license.is_deprecated + + license = license_by_id("LGPL-3.0-or-later") + + assert license.id == "LGPL-3.0-or-later" + assert license.name == "GNU Lesser General Public License v3.0 or later" + assert license.is_osi_approved + assert not license.is_deprecated + + +def test_license_by_id_is_case_insensitive() -> None: + license = license_by_id("mit") + + assert license.id == "MIT" + + license = license_by_id("miT") + + assert license.id == "MIT" + + +def test_license_by_id_with_full_name() -> None: + license = license_by_id("GNU Lesser General Public License v3.0 or later") + + assert license.id == "LGPL-3.0-or-later" + assert license.name == "GNU Lesser General Public License v3.0 or later" + assert license.is_osi_approved + assert not license.is_deprecated + + +def test_license_by_id_invalid() -> None: + with pytest.raises(ValueError): + license_by_id("") + + +def test_license_by_id_custom() -> None: + license = license_by_id("Custom") + + assert license.id == "Custom" + assert license.name == "Custom" + assert not license.is_osi_approved + assert not license.is_deprecated diff --git a/tests/spdx/test_license.py b/tests/spdx/test_license.py new file mode 100644 index 0000000..6cada07 --- /dev/null +++ b/tests/spdx/test_license.py @@ -0,0 +1,59 @@ +from __future__ import annotations + +from poetry.core.spdx.helpers import license_by_id + + +def test_classifier_name() -> None: + license = license_by_id("lgpl-3.0-or-later") + + assert ( + license.classifier_name + == "GNU Lesser General Public License v3 or later (LGPLv3+)" + ) + + +def test_classifier_name_no_classifer_osi_approved() -> None: + license = license_by_id("LiLiQ-R-1.1") + + assert license.classifier_name is None + + +def test_classifier_name_no_classifer() -> None: + license = license_by_id("Leptonica") + + assert license.classifier_name == "Other/Proprietary License" + + +def test_classifier() -> None: + license = license_by_id("lgpl-3.0-or-later") + + assert ( + license.classifier + == "License :: " + "OSI Approved :: " + "GNU Lesser General Public License v3 or later (LGPLv3+)" + ) + + +def test_classifier_no_classifer_osi_approved() -> None: + license = license_by_id("LiLiQ-R-1.1") + + assert license.classifier == "License :: OSI Approved" + + +def test_classifier_no_classifer() -> None: + license = license_by_id("Leptonica") + + assert license.classifier == "License :: Other/Proprietary License" + + +def test_proprietary_license() -> None: + license = license_by_id("Proprietary") + + assert license.classifier == "License :: Other/Proprietary License" + + +def test_custom_license() -> None: + license = license_by_id("Amazon Software License") + + assert license.classifier == "License :: Other/Proprietary License" diff --git a/tests/test_core_version.py b/tests/test_core_version.py new file mode 100644 index 0000000..601ef49 --- /dev/null +++ b/tests/test_core_version.py @@ -0,0 +1,11 @@ +from __future__ import annotations + +from pathlib import Path + +from poetry.core import __version__ +from poetry.core.pyproject.toml import PyProjectTOML + + +def test_version_is_synced() -> None: + pyproject = PyProjectTOML(Path(__file__).parent.parent.joinpath("pyproject.toml")) + assert __version__ == pyproject.poetry_config.get("version") diff --git a/tests/test_factory.py b/tests/test_factory.py new file mode 100644 index 0000000..af4101d --- /dev/null +++ b/tests/test_factory.py @@ -0,0 +1,411 @@ +from __future__ import annotations + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any +from typing import cast + +import pytest + +from packaging.utils import canonicalize_name + +from poetry.core.constraints.version import parse_constraint +from poetry.core.factory import Factory +from poetry.core.packages.url_dependency import URLDependency +from poetry.core.toml import TOMLFile +from poetry.core.version.markers import SingleMarker + + +if TYPE_CHECKING: + from poetry.core.packages.dependency import Dependency + from poetry.core.packages.vcs_dependency import VCSDependency + + +fixtures_dir = Path(__file__).parent / "fixtures" + + +def test_create_poetry() -> None: + poetry = Factory().create_poetry(fixtures_dir / "sample_project") + + package = poetry.package + + assert package.name == "my-package" + assert package.version.text == "1.2.3" + assert package.description == "Some description." + assert package.authors == ["Sébastien Eustace "] + assert package.license + assert package.license.id == "MIT" + assert ( + package.readmes[0].relative_to(fixtures_dir).as_posix() + == "sample_project/README.rst" + ) + assert package.homepage == "https://python-poetry.org" + assert package.repository_url == "https://github.com/python-poetry/poetry" + assert package.keywords == ["packaging", "dependency", "poetry"] + + assert package.python_versions == "~2.7 || ^3.6" + assert str(package.python_constraint) == ">=2.7,<2.8 || >=3.6,<4.0" + + dependencies: dict[str, Dependency] = {} + for dep in package.requires: + dependencies[dep.name] = dep + + cleo = dependencies["cleo"] + assert cleo.pretty_constraint == "^0.6" + assert not cleo.is_optional() + + pendulum = dependencies["pendulum"] + assert pendulum.pretty_constraint == "branch 2.0" + assert pendulum.is_vcs() + pendulum = cast("VCSDependency", pendulum) + assert pendulum.vcs == "git" + assert pendulum.branch == "2.0" + assert pendulum.source == "https://github.com/sdispater/pendulum.git" + assert pendulum.allows_prereleases() + assert not pendulum.develop + + tomlkit = dependencies["tomlkit"] + assert tomlkit.pretty_constraint == "rev 3bff550" + assert tomlkit.is_vcs() + tomlkit = cast("VCSDependency", tomlkit) + assert tomlkit.vcs == "git" + assert tomlkit.rev == "3bff550" + assert tomlkit.source == "https://github.com/sdispater/tomlkit.git" + assert tomlkit.allows_prereleases() + assert not tomlkit.develop + + requests = dependencies["requests"] + assert requests.pretty_constraint == "^2.18" + assert not requests.is_vcs() + assert not requests.allows_prereleases() + assert requests.is_optional() + assert requests.extras == frozenset({"security"}) + + pathlib2 = dependencies["pathlib2"] + assert pathlib2.pretty_constraint == "^2.2" + assert pathlib2.python_versions == ">=2.7 <2.8" + assert not pathlib2.is_optional() + + demo = dependencies["demo"] + assert demo.is_file() + assert not demo.is_vcs() + assert demo.name == "demo" + assert demo.pretty_constraint == "*" + + demo = dependencies["my-package"] + assert not demo.is_file() + assert demo.is_directory() + assert not demo.is_vcs() + assert demo.name == "my-package" + assert demo.pretty_constraint == "*" + + simple_project = dependencies["simple-project"] + assert not simple_project.is_file() + assert simple_project.is_directory() + assert not simple_project.is_vcs() + assert simple_project.name == "simple-project" + assert simple_project.pretty_constraint == "*" + + functools32 = dependencies["functools32"] + assert functools32.name == "functools32" + assert functools32.pretty_constraint == "^3.2.3" + assert ( + str(functools32.marker) + == 'python_version ~= "2.7" and sys_platform == "win32" or python_version in' + ' "3.4 3.5"' + ) + + dataclasses = dependencies["dataclasses"] + assert dataclasses.name == "dataclasses" + assert dataclasses.pretty_constraint == "^0.7" + assert dataclasses.python_versions == ">=3.6.1 <3.7" + assert ( + str(dataclasses.marker) + == 'python_full_version >= "3.6.1" and python_version < "3.7"' + ) + + assert "db" in package.extras + + classifiers = package.classifiers + + assert classifiers == [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules", + ] + + assert package.all_classifiers == [ + "License :: OSI Approved :: MIT License", + "Programming Language :: Python :: 2", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules", + ] + + +def test_create_poetry_with_packages_and_includes() -> None: + poetry = Factory().create_poetry( + fixtures_dir.parent / "masonry" / "builders" / "fixtures" / "with-include" + ) + + package = poetry.package + + assert package.packages == [ + {"include": "extra_dir/**/*.py"}, + {"include": "extra_dir/**/*.py"}, + {"include": "my_module.py"}, + {"include": "package_with_include"}, + {"include": "tests", "format": "sdist"}, + {"include": "for_wheel_only", "format": ["wheel"]}, + {"include": "src_package", "from": "src"}, + ] + + assert package.include == [ + {"path": "extra_dir/vcs_excluded.txt", "format": []}, + {"path": "notes.txt", "format": []}, + ] + + +def test_create_poetry_with_multi_constraints_dependency() -> None: + poetry = Factory().create_poetry( + fixtures_dir / "project_with_multi_constraints_dependency" + ) + + package = poetry.package + + assert len(package.requires) == 2 + + +def test_validate() -> None: + complete = TOMLFile(fixtures_dir / "complete.toml") + doc: dict[str, Any] = complete.read() + content = doc["tool"]["poetry"] + + assert Factory.validate(content) == {"errors": [], "warnings": []} + + +def test_validate_fails() -> None: + complete = TOMLFile(fixtures_dir / "complete.toml") + doc: dict[str, Any] = complete.read() + content = doc["tool"]["poetry"] + content["authors"] = "this is not a valid array" + + expected = "[authors] 'this is not a valid array' is not of type 'array'" + + assert Factory.validate(content) == {"errors": [expected], "warnings": []} + + +def test_validate_without_strict_fails_only_non_strict() -> None: + project_failing_strict_validation = TOMLFile( + fixtures_dir / "project_failing_strict_validation" / "pyproject.toml" + ) + doc: dict[str, Any] = project_failing_strict_validation.read() + content = doc["tool"]["poetry"] + + assert Factory.validate(content) == { + "errors": [ + "'name' is a required property", + "'version' is a required property", + "'description' is a required property", + "'authors' is a required property", + ], + "warnings": [], + } + + +def test_validate_strict_fails_strict_and_non_strict() -> None: + project_failing_strict_validation = TOMLFile( + fixtures_dir / "project_failing_strict_validation" / "pyproject.toml" + ) + doc: dict[str, Any] = project_failing_strict_validation.read() + content = doc["tool"]["poetry"] + + assert Factory.validate(content, strict=True) == { + "errors": [ + "'name' is a required property", + "'version' is a required property", + "'description' is a required property", + "'authors' is a required property", + ( + 'Script "a_script_with_unknown_extra" requires extra "foo" which is not' + " defined." + ), + ( + "Declared README files must be of same type: found text/markdown," + " text/x-rst" + ), + ], + "warnings": [ + ( + "A wildcard Python dependency is ambiguous. Consider specifying a more" + " explicit one." + ), + ( + 'The "pathlib2" dependency specifies the "allows-prereleases" property,' + ' which is deprecated. Use "allow-prereleases" instead.' + ), + ], + } + + +def test_strict_validation_success_on_multiple_readme_files() -> None: + with_readme_files = TOMLFile(fixtures_dir / "with_readme_files" / "pyproject.toml") + doc: dict[str, Any] = with_readme_files.read() + content = doc["tool"]["poetry"] + + assert Factory.validate(content, strict=True) == {"errors": [], "warnings": []} + + +def test_strict_validation_fails_on_readme_files_with_unmatching_types() -> None: + with_readme_files = TOMLFile(fixtures_dir / "with_readme_files" / "pyproject.toml") + doc: dict[str, Any] = with_readme_files.read() + content = doc["tool"]["poetry"] + content["readme"][0] = "README.md" + + assert Factory.validate(content, strict=True) == { + "errors": [ + "Declared README files must be of same type: found text/markdown," + " text/x-rst" + ], + "warnings": [], + } + + +def test_create_poetry_fails_on_invalid_configuration() -> None: + with pytest.raises(RuntimeError) as e: + Factory().create_poetry( + Path(__file__).parent / "fixtures" / "invalid_pyproject" / "pyproject.toml" + ) + + expected = """\ +The Poetry configuration is invalid: + - 'description' is a required property +""" + assert str(e.value) == expected + + +def test_create_poetry_omits_dev_dependencies_iff_with_dev_is_false() -> None: + poetry = Factory().create_poetry(fixtures_dir / "sample_project", with_groups=False) + assert not any("dev" in r.groups for r in poetry.package.all_requires) + + poetry = Factory().create_poetry(fixtures_dir / "sample_project") + assert any("dev" in r.groups for r in poetry.package.all_requires) + + +def test_create_poetry_fails_with_invalid_dev_dependencies_iff_with_dev_is_true() -> ( + None +): + with pytest.raises(ValueError) as err: + Factory().create_poetry(fixtures_dir / "project_with_invalid_dev_deps") + assert "does not exist" in str(err.value) + + Factory().create_poetry( + fixtures_dir / "project_with_invalid_dev_deps", with_groups=False + ) + + +def test_create_poetry_with_groups_and_legacy_dev() -> None: + poetry = Factory().create_poetry( + fixtures_dir / "project_with_groups_and_legacy_dev" + ) + + package = poetry.package + dependencies = package.all_requires + + assert len(dependencies) == 2 + assert {dependency.name for dependency in dependencies} == {"pytest", "pre-commit"} + + +def test_create_poetry_with_groups_and_explicit_main() -> None: + poetry = Factory().create_poetry( + fixtures_dir / "project_with_groups_and_explicit_main" + ) + + package = poetry.package + dependencies = package.requires + + assert len(dependencies) == 1 + assert {dependency.name for dependency in dependencies} == { + "aiohttp", + } + + +def test_create_poetry_with_markers_and_extras() -> None: + poetry = Factory().create_poetry(fixtures_dir / "project_with_markers_and_extras") + + package = poetry.package + dependencies = package.requires + extras = package.extras + + assert len(dependencies) == 2 + assert {dependency.name for dependency in dependencies} == {"orjson"} + assert set(extras[canonicalize_name("all")]) == set(dependencies) + for dependency in dependencies: + assert dependency.in_extras == ["all"] + assert isinstance(dependency, URLDependency) + assert isinstance(dependency.marker, SingleMarker) + assert dependency.marker.name == "sys_platform" + assert dependency.marker.value == ( + "darwin" if "macosx" in dependency.url else "linux" + ) + + +@pytest.mark.parametrize( + "constraint, exp_python, exp_marker", + [ + ({"python": "3.7"}, "~3.7", 'python_version == "3.7"'), + ({"platform": "linux"}, "*", 'sys_platform == "linux"'), + ({"markers": 'python_version == "3.7"'}, "~3.7", 'python_version == "3.7"'), + ( + {"markers": 'platform_machine == "x86_64"'}, + "*", + 'platform_machine == "x86_64"', + ), + ( + {"python": "3.7", "markers": 'platform_machine == "x86_64"'}, + "~3.7", + 'platform_machine == "x86_64" and python_version == "3.7"', + ), + ( + {"platform": "linux", "markers": 'platform_machine == "x86_64"'}, + "*", + 'platform_machine == "x86_64" and sys_platform == "linux"', + ), + ( + { + "python": "3.7", + "platform": "linux", + "markers": 'platform_machine == "x86_64"', + }, + "~3.7", + ( + 'platform_machine == "x86_64" and python_version == "3.7" and' + ' sys_platform == "linux"' + ), + ), + ( + {"python": ">=3.7", "markers": 'python_version < "4.0"'}, + "<4.0 >=3.7", + 'python_version < "4.0" and python_version >= "3.7"', + ), + ( + {"platform": "linux", "markers": 'sys_platform == "win32"'}, + "*", + "", + ), + ], +) +def test_create_dependency_marker_variants( + constraint: dict[str, Any], exp_python: str, exp_marker: str +) -> None: + constraint["version"] = "1.0.0" + dep = Factory.create_dependency("foo", constraint) + assert dep.python_versions == exp_python + assert dep.python_constraint == parse_constraint(exp_python) + assert str(dep.marker) == exp_marker diff --git a/tests/testutils.py b/tests/testutils.py new file mode 100644 index 0000000..d43b4ab --- /dev/null +++ b/tests/testutils.py @@ -0,0 +1,82 @@ +from __future__ import annotations + +import shutil +import subprocess +import tarfile +import tempfile +import zipfile + +from contextlib import contextmanager +from pathlib import Path +from typing import Any +from typing import Generator + +from poetry.core.toml import TOMLFile + + +__toml_build_backend_patch__ = { + "build-system": { + "requires": [str(Path(__file__).parent.parent)], + "build-backend": "poetry.core.masonry.api", + } +} + + +@contextmanager +def temporary_project_directory( + path: Path, toml_patch: dict[str, Any] | None = None +) -> Generator[str, None, None]: + """ + Context manager that takes a project source directory, copies content to a temporary + directory, patches the `pyproject.toml` using the provided patch, or using the default + patch if one is not given. The default path replaces `build-system` section in order + to use the working copy of poetry-core as the backend. + + Once the context, exists, the temporary directory is cleaned up. + + :param path: Source project root directory to copy from. + :param toml_patch: Patch to use for the pyproject.toml, defaults to build system patching. + :return: A temporary copy + """ + assert (path / "pyproject.toml").exists() + + with tempfile.TemporaryDirectory(prefix="poetry-core-pep517") as tmp: + dst = Path(tmp) / path.name + shutil.copytree(str(path), dst) + toml = TOMLFile(str(dst / "pyproject.toml")) + data = toml.read() + data.update(toml_patch or __toml_build_backend_patch__) + toml.write(data) + yield str(dst) + + +def subprocess_run(*args: str, **kwargs: Any) -> subprocess.CompletedProcess[str]: + """ + Helper method to run a subprocess. Asserts for success. + """ + result = subprocess.run(args, text=True, capture_output=True, **kwargs) + assert result.returncode == 0 + return result + + +def validate_wheel_contents( + name: str, version: str, path: str, files: list[str] | None = None +) -> None: + dist_info = f"{name}-{version}.dist-info" + files = files or [] + + with zipfile.ZipFile(path) as z: + namelist = z.namelist() + # we use concatenation here for PY2 compat + for filename in ["WHEEL", "METADATA", "RECORD"] + files: + assert f"{dist_info}/{filename}" in namelist + + +def validate_sdist_contents( + name: str, version: str, path: str, files: list[str] +) -> None: + escaped_name = name.replace("-", "_") + with tarfile.open(path) as tar: + namelist = tar.getnames() + for filename in files: + assert f"{escaped_name}-{version}/{filename}" in namelist diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/utils/test_helpers.py b/tests/utils/test_helpers.py new file mode 100644 index 0000000..f8fe439 --- /dev/null +++ b/tests/utils/test_helpers.py @@ -0,0 +1,120 @@ +from __future__ import annotations + +import os + +from pathlib import Path +from stat import S_IREAD + +import pytest + +from poetry.core.utils.helpers import combine_unicode +from poetry.core.utils.helpers import parse_requires +from poetry.core.utils.helpers import readme_content_type +from poetry.core.utils.helpers import temporary_directory + + +def test_parse_requires() -> None: + requires = """\ +jsonschema>=2.6.0.0,<3.0.0.0 +lockfile>=0.12.0.0,<0.13.0.0 +pip-tools>=1.11.0.0,<2.0.0.0 +pkginfo>=1.4.0.0,<2.0.0.0 +pyrsistent>=0.14.2.0,<0.15.0.0 +toml>=0.9.0.0,<0.10.0.0 +cleo>=0.6.0.0,<0.7.0.0 +cachy>=0.1.1.0,<0.2.0.0 +cachecontrol>=0.12.4.0,<0.13.0.0 +requests>=2.18.0.0,<3.0.0.0 +msgpack-python>=0.5.0.0,<0.6.0.0 +pyparsing>=2.2.0.0,<3.0.0.0 +requests-toolbelt>=0.8.0.0,<0.9.0.0 + +[:(python_version >= "2.7.0.0" and python_version < "2.8.0.0") or (python_version >= "3.4.0.0" and python_version < "3.5.0.0")] +typing>=3.6.0.0,<4.0.0.0 + +[:python_version >= "2.7.0.0" and python_version < "2.8.0.0"] +virtualenv>=15.2.0.0,<16.0.0.0 +pathlib2>=2.3.0.0,<3.0.0.0 + +[:python_version >= "3.4.0.0" and python_version < "3.6.0.0"] +zipfile36>=0.1.0.0,<0.2.0.0 + +[dev] +isort@ git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort +""" + result = parse_requires(requires) + expected = [ + "jsonschema>=2.6.0.0,<3.0.0.0", + "lockfile>=0.12.0.0,<0.13.0.0", + "pip-tools>=1.11.0.0,<2.0.0.0", + "pkginfo>=1.4.0.0,<2.0.0.0", + "pyrsistent>=0.14.2.0,<0.15.0.0", + "toml>=0.9.0.0,<0.10.0.0", + "cleo>=0.6.0.0,<0.7.0.0", + "cachy>=0.1.1.0,<0.2.0.0", + "cachecontrol>=0.12.4.0,<0.13.0.0", + "requests>=2.18.0.0,<3.0.0.0", + "msgpack-python>=0.5.0.0,<0.6.0.0", + "pyparsing>=2.2.0.0,<3.0.0.0", + "requests-toolbelt>=0.8.0.0,<0.9.0.0", + ( + 'typing>=3.6.0.0,<4.0.0.0 ; (python_version >= "2.7.0.0" and python_version' + ' < "2.8.0.0") or (python_version >= "3.4.0.0" and python_version <' + ' "3.5.0.0")' + ), + ( + 'virtualenv>=15.2.0.0,<16.0.0.0 ; python_version >= "2.7.0.0" and' + ' python_version < "2.8.0.0"' + ), + ( + 'pathlib2>=2.3.0.0,<3.0.0.0 ; python_version >= "2.7.0.0" and' + ' python_version < "2.8.0.0"' + ), + ( + 'zipfile36>=0.1.0.0,<0.2.0.0 ; python_version >= "3.4.0.0" and' + ' python_version < "3.6.0.0"' + ), + ( + "isort@" + " git+git://github.com/timothycrosley/isort.git@e63ae06ec7d70b06df9e528357650281a3d3ec22#egg=isort" + ' ; extra == "dev"' + ), + ] + assert result == expected + + +def test_utils_helpers_combine_unicode() -> None: + combined_expected = "é" + decomposed = "é" + assert combined_expected != decomposed + + combined = combine_unicode(decomposed) + assert combined == combined_expected + + +def test_utils_helpers_temporary_directory_readonly_file() -> None: + with temporary_directory() as temp_dir: + readonly_filename = os.path.join(temp_dir, "file.txt") + with open(readonly_filename, "w+") as readonly_file: + readonly_file.write("Poetry rocks!") + os.chmod(str(readonly_filename), S_IREAD) + + assert not os.path.exists(temp_dir) + assert not os.path.exists(readonly_filename) + + +@pytest.mark.parametrize( + "readme, content_type", + [ + ("README.rst", "text/x-rst"), + ("README.md", "text/markdown"), + ("README", "text/plain"), + (Path("README.rst"), "text/x-rst"), + (Path("README.md"), "text/markdown"), + (Path("README"), "text/plain"), + ], +) +def test_utils_helpers_readme_content_type( + readme: str | Path, content_type: str +) -> None: + assert readme_content_type(readme) == content_type diff --git a/tests/vcs/__init__.py b/tests/vcs/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/vcs/test_vcs.py b/tests/vcs/test_vcs.py new file mode 100644 index 0000000..67fff1d --- /dev/null +++ b/tests/vcs/test_vcs.py @@ -0,0 +1,475 @@ +from __future__ import annotations + +import subprocess + +from pathlib import Path +from typing import TYPE_CHECKING +from typing import Any + +import pytest + +from poetry.core.utils._compat import WINDOWS +from poetry.core.vcs.git import Git +from poetry.core.vcs.git import GitError +from poetry.core.vcs.git import GitUrl +from poetry.core.vcs.git import ParsedUrl +from poetry.core.vcs.git import _reset_executable + + +if TYPE_CHECKING: + from pytest_mock import MockerFixture + + +@pytest.mark.parametrize( + "url, normalized", + [ + ( + "git+ssh://user@hostname:project.git#commit", + GitUrl("user@hostname:project.git", "commit", None), + ), + ( + "git+http://user@hostname/project/blah.git@commit", + GitUrl("http://user@hostname/project/blah.git", "commit", None), + ), + ( + "git+https://user@hostname/project/blah.git", + GitUrl("https://user@hostname/project/blah.git", None, None), + ), + ( + "git+https://user@hostname/project~_-.foo/blah~_-.bar.git", + GitUrl("https://user@hostname/project~_-.foo/blah~_-.bar.git", None, None), + ), + ( + "git+https://user@hostname:project/blah.git", + GitUrl("https://user@hostname/project/blah.git", None, None), + ), + ( + "git+ssh://git@github.com:sdispater/poetry.git#v1.0.27", + GitUrl("git@github.com:sdispater/poetry.git", "v1.0.27", None), + ), + ( + "git+ssh://git@github.com:/sdispater/poetry.git", + GitUrl("git@github.com:/sdispater/poetry.git", None, None), + ), + ( + "git+ssh://git@github.com:org/repo", + GitUrl("git@github.com:org/repo", None, None), + ), + ( + "git+ssh://git@github.com/org/repo", + GitUrl("ssh://git@github.com/org/repo", None, None), + ), + ("git+ssh://foo:22/some/path", GitUrl("ssh://foo:22/some/path", None, None)), + ("git@github.com:org/repo", GitUrl("git@github.com:org/repo", None, None)), + ( + "git+https://github.com/sdispater/pendulum", + GitUrl("https://github.com/sdispater/pendulum", None, None), + ), + ( + "git+https://github.com/sdispater/pendulum#7a018f2d075b03a73409e8356f9b29c9ad4ea2c5", + GitUrl( + "https://github.com/sdispater/pendulum", + "7a018f2d075b03a73409e8356f9b29c9ad4ea2c5", + None, + ), + ), + ( + "git+ssh://git@git.example.com:b/b.git#v1.0.0", + GitUrl("git@git.example.com:b/b.git", "v1.0.0", None), + ), + ( + "git+ssh://git@github.com:sdispater/pendulum.git#foo/bar", + GitUrl("git@github.com:sdispater/pendulum.git", "foo/bar", None), + ), + ("git+file:///foo/bar.git", GitUrl("file:///foo/bar.git", None, None)), + ( + "git+file://C:\\Users\\hello\\testing.git#zkat/windows-files", + GitUrl("file://C:\\Users\\hello\\testing.git", "zkat/windows-files", None), + ), + # hidden directories on Windows use $ in their path + # python-poetry/poetry#5493 + ( + "git+file://C:\\Users\\hello$\\testing.git#zkat/windows-files", + GitUrl("file://C:\\Users\\hello$\\testing.git", "zkat/windows-files", None), + ), + ( + "git+https://git.example.com/sdispater/project/my_repo.git", + GitUrl("https://git.example.com/sdispater/project/my_repo.git", None, None), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git", + GitUrl("git@git.example.com:sdispater/project/my_repo.git", None, None), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git#subdirectory=project", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + None, + "project", + ), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git@commit#subdirectory=project", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + "commit", + "project", + ), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git#commit&subdirectory=project", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + "commit", + "project", + ), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git#commit#subdirectory=project", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + "commit", + "project", + ), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git@commit&subdirectory=project", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + "commit", + "project", + ), + ), + ( + "git+https://github.com/demo/pyproject-demo-subdirectory.git@subdirectory#subdirectory=subdirectory", + GitUrl( + "https://github.com/demo/pyproject-demo-subdirectory.git", + "subdirectory", + "subdirectory", + ), + ), + ], +) +def test_normalize_url(url: str, normalized: GitUrl) -> None: + assert normalized == Git.normalize_url(url) + + +@pytest.mark.parametrize( + "url, parsed", + [ + ( + "git+ssh://user@hostname:project.git#commit", + ParsedUrl( + "ssh", "hostname", ":project.git", "user", None, "project", "commit" + ), + ), + ( + "git+http://user@hostname/project/blah.git@commit", + ParsedUrl( + "http", "hostname", "/project/blah.git", "user", None, "blah", "commit" + ), + ), + ( + "git+https://user@hostname/project/blah.git", + ParsedUrl( + "https", "hostname", "/project/blah.git", "user", None, "blah", None + ), + ), + ( + "git+https://user@hostname/project~_-.foo/blah~_-.bar.git", + ParsedUrl( + "https", + "hostname", + "/project~_-.foo/blah~_-.bar.git", + "user", + None, + "blah~_-.bar", + None, + ), + ), + ( + "git+https://user@hostname:project/blah.git", + ParsedUrl( + "https", "hostname", ":project/blah.git", "user", None, "blah", None + ), + ), + ( + "git+ssh://git@github.com:sdispater/poetry.git#v1.0.27", + ParsedUrl( + "ssh", + "github.com", + ":sdispater/poetry.git", + "git", + None, + "poetry", + "v1.0.27", + ), + ), + ( + "git+ssh://git@github.com:sdispater/poetry.git#egg=name", + ParsedUrl( + "ssh", + "github.com", + ":sdispater/poetry.git", + "git", + None, + "poetry", + None, + ), + ), + ( + "git+ssh://git@github.com:/sdispater/poetry.git", + ParsedUrl( + "ssh", + "github.com", + ":/sdispater/poetry.git", + "git", + None, + "poetry", + None, + ), + ), + ( + "git+ssh://git@github.com:org/repo", + ParsedUrl("ssh", "github.com", ":org/repo", "git", None, "repo", None), + ), + ( + "git+ssh://git@github.com/org/repo", + ParsedUrl("ssh", "github.com", "/org/repo", "git", None, "repo", None), + ), + ( + "git+ssh://foo:22/some/path", + ParsedUrl("ssh", "foo", "/some/path", None, "22", "path", None), + ), + ( + "git@github.com:org/repo", + ParsedUrl(None, "github.com", ":org/repo", "git", None, "repo", None), + ), + ( + "git+https://github.com/sdispater/pendulum", + ParsedUrl( + "https", + "github.com", + "/sdispater/pendulum", + None, + None, + "pendulum", + None, + ), + ), + ( + "git+https://github.com/sdispater/pendulum#7a018f2d075b03a73409e8356f9b29c9ad4ea2c5", + ParsedUrl( + "https", + "github.com", + "/sdispater/pendulum", + None, + None, + "pendulum", + "7a018f2d075b03a73409e8356f9b29c9ad4ea2c5", + ), + ), + ( + "git+ssh://git@git.example.com:b/b.git#v1.0.0", + ParsedUrl("ssh", "git.example.com", ":b/b.git", "git", None, "b", "v1.0.0"), + ), + ( + "git+ssh://git@github.com:sdispater/pendulum.git#foo/bar", + ParsedUrl( + "ssh", + "github.com", + ":sdispater/pendulum.git", + "git", + None, + "pendulum", + "foo/bar", + ), + ), + ( + "git+file:///foo/bar.git", + ParsedUrl("file", None, "/foo/bar.git", None, None, "bar", None), + ), + ( + "git+file://C:\\Users\\hello\\testing.git#zkat/windows-files", + ParsedUrl( + "file", + "C", + ":\\Users\\hello\\testing.git", + None, + None, + "testing", + "zkat/windows-files", + ), + ), + ( + "git+https://git.example.com/sdispater/project/my_repo.git", + ParsedUrl( + "https", + "git.example.com", + "/sdispater/project/my_repo.git", + None, + None, + "my_repo", + None, + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + None, + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git#subdirectory=project-dir", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + None, + "project-dir", + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git#commit&subdirectory=project-dir", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + "commit", + "project-dir", + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit#subdirectory=project-dir", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + "commit", + "project-dir", + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit&subdirectory=project_dir", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + "commit", + "project_dir", + ), + ), + ( + "git+ssh://git@git.example.com:sdispater/project/my_repo.git@commit#egg=package&subdirectory=project_dir", + ParsedUrl( + "ssh", + "git.example.com", + ":sdispater/project/my_repo.git", + "git", + None, + "my_repo", + "commit", + "project_dir", + ), + ), + ], +) +def test_parse_url(url: str, parsed: ParsedUrl) -> None: + result = ParsedUrl.parse(url) + assert parsed.name == result.name + assert parsed.pathname == result.pathname + assert parsed.port == result.port + assert parsed.protocol == result.protocol + assert parsed.resource == result.resource + assert parsed.rev == result.rev + assert parsed.url == result.url + assert parsed.user == result.user + + +def test_parse_url_should_fail() -> None: + url = "https://" + "@" * 64 + "!" + + with pytest.raises(ValueError): + ParsedUrl.parse(url) + + +def test_git_clone_raises_error_on_invalid_repository() -> None: + with pytest.raises(GitError): + Git().clone("-u./payload", Path("foo")) + + +def test_git_checkout_raises_error_on_invalid_repository() -> None: + with pytest.raises(GitError): + Git().checkout("-u./payload") + + +def test_git_rev_parse_raises_error_on_invalid_repository() -> None: + with pytest.raises(GitError): + Git().rev_parse("-u./payload") + + +@pytest.mark.skipif( + not WINDOWS, + reason=( + "Retrieving the complete path to git is only necessary on Windows, for security" + " reasons" + ), +) +def test_ensure_absolute_path_to_git(mocker: MockerFixture) -> None: + _reset_executable() + + def checkout_output(cmd: list[str], *args: Any, **kwargs: Any) -> str | bytes: + if Path(cmd[0]).name == "where.exe": + return "\n".join( + [ + str(Path.cwd().joinpath("git.exe")), + "C:\\Git\\cmd\\git.exe", + ] + ) + + return b"" + + mock = mocker.patch.object(subprocess, "check_output", side_effect=checkout_output) + + Git().run("config") + + assert mock.call_args_list[-1][0][0] == [ + "C:\\Git\\cmd\\git.exe", + "config", + ] + + +@pytest.mark.skipif( + not WINDOWS, + reason=( + "Retrieving the complete path to git is only necessary on Windows, for security" + " reasons" + ), +) +def test_ensure_existing_git_executable_is_found(mocker: MockerFixture) -> None: + mock = mocker.patch.object(subprocess, "check_output", return_value=b"") + + Git().run("config") + + cmd = Path(mock.call_args_list[-1][0][0][0]) + + assert cmd.is_absolute() + assert cmd.name == "git.exe" diff --git a/tests/version/__init__.py b/tests/version/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/version/pep440/__init__.py b/tests/version/pep440/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/tests/version/pep440/test_segments.py b/tests/version/pep440/test_segments.py new file mode 100644 index 0000000..6df0799 --- /dev/null +++ b/tests/version/pep440/test_segments.py @@ -0,0 +1,110 @@ +from __future__ import annotations + +import pytest + +from poetry.core.version.pep440 import Release +from poetry.core.version.pep440 import ReleaseTag +from poetry.core.version.pep440.segments import RELEASE_PHASE_NORMALIZATIONS + + +def test_release_post_init_minor_and_patch() -> None: + release = Release(1, extra=(0,)) + assert release.minor == 0 + assert release.patch == 0 + + +@pytest.mark.parametrize( + "parts,result", + [ + ((1,), Release(1)), + ((1, 2), Release(1, 2)), + ((1, 2, 3), Release(1, 2, 3)), + ((1, 2, 3, 4), Release(1, 2, 3, (4,))), + ((1, 2, 3, 4, 5, 6), Release(1, 2, 3, (4, 5, 6))), + ], +) +def test_release_from_parts(parts: tuple[int, ...], result: Release) -> None: + assert Release.from_parts(*parts) == result + + +@pytest.mark.parametrize("precision", list(range(1, 6))) +def test_release_precision(precision: int) -> None: + """ + Semantically identical releases might have a different precision, e.g. 1 vs. 1.0 + """ + assert Release.from_parts(1, *[0] * (precision - 1)).precision == precision + + +@pytest.mark.parametrize("precision", list(range(1, 6))) +def test_release_text(precision: int) -> None: + increments = list(range(1, precision + 1)) + zeros = [1] + [0] * (precision - 1) + + assert Release.from_parts(*increments).text == ".".join(str(i) for i in increments) + assert Release.from_parts(*zeros).text == ".".join(str(i) for i in zeros) + + +@pytest.mark.parametrize("precision", list(range(1, 6))) +def test_release_next_major(precision: int) -> None: + release = Release.from_parts(1, *[0] * (precision - 1)) + expected = Release.from_parts(2, *[0] * (precision - 1)) + assert release.next_major() == expected + + +@pytest.mark.parametrize("precision", list(range(1, 6))) +def test_release_next_minor(precision: int) -> None: + release = Release.from_parts(1, *[0] * (precision - 1)) + expected = Release.from_parts(1, 1, *[0] * (precision - 2)) + assert release.next_minor() == expected + + +@pytest.mark.parametrize("precision", list(range(1, 6))) +def test_release_next_patch(precision: int) -> None: + release = Release.from_parts(1, *[0] * (precision - 1)) + expected = Release.from_parts(1, 0, 1, *[0] * (precision - 3)) + assert release.next_patch() == expected + + +@pytest.mark.parametrize( + "parts,result", + [ + (("a",), ReleaseTag("alpha", 0)), + (("a", 1), ReleaseTag("alpha", 1)), + (("b",), ReleaseTag("beta", 0)), + (("b", 1), ReleaseTag("beta", 1)), + (("pre",), ReleaseTag("preview", 0)), + (("pre", 1), ReleaseTag("preview", 1)), + (("c",), ReleaseTag("rc", 0)), + (("c", 1), ReleaseTag("rc", 1)), + (("r",), ReleaseTag("rev", 0)), + (("r", 1), ReleaseTag("rev", 1)), + ], +) +def test_release_tag_normalisation( + parts: tuple[str] | tuple[str, int], result: ReleaseTag +) -> None: + tag = ReleaseTag(*parts) + assert tag == result + assert tag.to_string() == result.to_string() + + +@pytest.mark.parametrize( + "parts,result", + [ + (("a",), ReleaseTag("beta")), + (("b",), ReleaseTag("rc")), + (("post",), None), + (("rc",), None), + (("rev",), None), + (("dev",), None), + ], +) +def test_release_tag_next_phase(parts: tuple[str], result: ReleaseTag | None) -> None: + assert ReleaseTag(*parts).next_phase() == result + + +@pytest.mark.parametrize("phase", list({*RELEASE_PHASE_NORMALIZATIONS.keys()})) +def test_release_tag_next(phase: str) -> None: + tag = ReleaseTag(phase=phase).next() + assert tag.phase == RELEASE_PHASE_NORMALIZATIONS[phase] + assert tag.number == 1 diff --git a/tests/version/pep440/test_version.py b/tests/version/pep440/test_version.py new file mode 100644 index 0000000..49ffe94 --- /dev/null +++ b/tests/version/pep440/test_version.py @@ -0,0 +1,343 @@ +from __future__ import annotations + +import pytest + +from poetry.core.version.exceptions import InvalidVersion +from poetry.core.version.pep440 import PEP440Version +from poetry.core.version.pep440 import Release +from poetry.core.version.pep440 import ReleaseTag + + +@pytest.mark.parametrize( + "text,result", + [ + ("1", PEP440Version(release=Release.from_parts(1))), + ("1.2.3", PEP440Version(release=Release.from_parts(1, 2, 3))), + ( + "1.2.3-1", + PEP440Version( + release=Release.from_parts(1, 2, 3), post=ReleaseTag("post", 1) + ), + ), + ( + "1.2.3.dev1", + PEP440Version( + release=Release.from_parts(1, 2, 3), dev=ReleaseTag("dev", 1) + ), + ), + ( + "1.2.3-1.dev1", + PEP440Version( + release=Release.from_parts(1, 2, 3), + post=ReleaseTag("post", 1), + dev=ReleaseTag("dev", 1), + ), + ), + ( + "1.2.3+local", + PEP440Version(release=Release.from_parts(1, 2, 3), local="local"), + ), + ( + "1.2.3+local.1", + PEP440Version(release=Release.from_parts(1, 2, 3), local=("local", 1)), + ), + ( + "1.2.3+local1", + PEP440Version(release=Release.from_parts(1, 2, 3), local="local1"), + ), + ("1.2.3+1", PEP440Version(release=Release.from_parts(1, 2, 3), local=1)), + ( + "1.2.3a1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1) + ), + ), + ( + "1.2.3.a1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1) + ), + ), + ( + "1.2.3alpha1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("alpha", 1) + ), + ), + ( + "1.2.3b1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1) + ), + ), + ( + "1.2.3.b1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1) + ), + ), + ( + "1.2.3beta1", + PEP440Version( + release=Release.from_parts(1, 2, 3), pre=ReleaseTag("beta", 1) + ), + ), + ( + "1.2.3rc1", + PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag("rc", 1)), + ), + ( + "1.2.3.rc1", + PEP440Version(release=Release.from_parts(1, 2, 3), pre=ReleaseTag("rc", 1)), + ), + ( + "2.2.0dev0+build.05669607", + PEP440Version( + release=Release.from_parts(2, 2, 0), + dev=ReleaseTag("dev", 0), + local=("build", "05669607"), + ), + ), + ], +) +def test_pep440_parse_text(text: str, result: PEP440Version) -> None: + assert PEP440Version.parse(text) == result + + +@pytest.mark.parametrize( + "text", ["1.2.3.dev1-1", "example-1", "1.2.3-random1", "1.2.3-1-1"] +) +def test_pep440_parse_text_invalid_versions(text: str) -> None: + with pytest.raises(InvalidVersion): + PEP440Version.parse(text) + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "2"), + ("2!1", "2!2"), + ("1+local", "2"), + ("1.2", "2.0"), + ("1.2.3", "2.0.0"), + ("1.2.3.4", "2.0.0.0"), + ("1.dev0", "1"), + ("1.2.dev0", "2.0"), + ("1.post1", "2"), + ("1.2.post1", "2.0"), + ("1.post1.dev0", "2"), + ("1.2.post1.dev0", "2.0"), + ("1.a1", "1"), + ("1.2a1", "2.0"), + ("1.a1.post2", "1"), + ("1.2a1.post2", "2.0"), + ("1.a1.post2.dev0", "1"), + ("1.2a1.post2.dev0", "2.0"), + ], +) +def test_next_major(version: str, expected: str) -> None: + v = PEP440Version.parse(version) + assert v.next_major().text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "1.1"), + ("1.2", "1.3"), + ("2!1.2", "2!1.3"), + ("1.2+local", "1.3"), + ("1.2.3", "1.3.0"), + ("1.2.3.4", "1.3.0.0"), + ("1.dev0", "1"), + ("1.2dev0", "1.2"), + ("1.2.3dev0", "1.3.0"), + ("1.post1", "1.1"), + ("1.2.post1", "1.3"), + ("1.2.3.post1", "1.3.0"), + ("1.post1.dev0", "1.1"), + ("1.2.post1.dev0", "1.3"), + ("1.a1", "1"), + ("1.2a1", "1.2"), + ("1.2.3a1", "1.3.0"), + ("1.a1.post2", "1"), + ("1.2a1.post2", "1.2"), + ("1.2.3a1.post2", "1.3.0"), + ("1.a1.post2.dev0", "1"), + ("1.2a1.post2.dev0", "1.2"), + ("1.2.3a1.post2.dev0", "1.3.0"), + ], +) +def test_next_minor(version: str, expected: str) -> None: + v = PEP440Version.parse(version) + assert v.next_minor().text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "1.0.1"), + ("1.2", "1.2.1"), + ("1.2.3", "1.2.4"), + ("2!1.2.3", "2!1.2.4"), + ("1.2.3+local", "1.2.4"), + ("1.2.3.4", "1.2.4.0"), + ("1.dev0", "1"), + ("1.2dev0", "1.2"), + ("1.2.3dev0", "1.2.3"), + ("1.2.3.4dev0", "1.2.4.0"), + ("1.post1", "1.0.1"), + ("1.2.post1", "1.2.1"), + ("1.2.3.post1", "1.2.4"), + ("1.post1.dev0", "1.0.1"), + ("1.2.post1.dev0", "1.2.1"), + ("1.2.3.post1.dev0", "1.2.4"), + ("1.a1", "1"), + ("1.2a1", "1.2"), + ("1.2.3a1", "1.2.3"), + ("1.2.3.4a1", "1.2.4.0"), + ("1.a1.post2", "1"), + ("1.2a1.post2", "1.2"), + ("1.2.3a1.post2", "1.2.3"), + ("1.2.3.4a1.post2", "1.2.4.0"), + ("1.a1.post2.dev0", "1"), + ("1.2a1.post2.dev0", "1.2"), + ("1.2.3a1.post2.dev0", "1.2.3"), + ("1.2.3.4a1.post2.dev0", "1.2.4.0"), + ], +) +def test_next_patch(version: str, expected: str) -> None: + v = PEP440Version.parse(version) + assert v.next_patch().text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1.2a1", "1.2a2"), + ("2!1.2a1", "2!1.2a2"), + ("1.2dev0", "1.2a0"), + ("1.2a1.dev0", "1.2a1"), + ("1.2a1.post1.dev0", "1.2a2"), + ], +) +def test_next_prerelease(version: str, expected: str) -> None: + v = PEP440Version.parse(version) + assert v.next_prerelease().text == expected + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", True), + ("1.2", True), + ("1.2.3", True), + ("2!1.2.3", True), + ("1.2.3+local", True), + ("1.2.3.4", True), + ("1.dev0", False), + ("1.2dev0", False), + ("1.2.3dev0", False), + ("1.2.3.4dev0", False), + ("1.post1", True), + ("1.2.post1", True), + ("1.2.3.post1", True), + ("1.post1.dev0", False), + ("1.2.post1.dev0", False), + ("1.2.3.post1.dev0", False), + ("1.a1", False), + ("1.2a1", False), + ("1.2.3a1", False), + ("1.2.3.4a1", False), + ("1.a1.post2", False), + ("1.2a1.post2", False), + ("1.2.3a1.post2", False), + ("1.2.3.4a1.post2", False), + ("1.a1.post2.dev0", False), + ("1.2a1.post2.dev0", False), + ("1.2.3a1.post2.dev0", False), + ("1.2.3.4a1.post2.dev0", False), + ], +) +def test_is_stable(version: str, expected: bool) -> None: + subject = PEP440Version.parse(version) + + assert subject.is_stable() == expected + assert subject.is_unstable() == (not expected) + + +@pytest.mark.parametrize( + "version, expected", + [ + ("0", True), + ("0.2", True), + ("0.2.3", True), + ("2!0.2.3", True), + ("0.2.3+local", True), + ("0.2.3.4", True), + ("0.dev0", False), + ("0.2dev0", False), + ("0.2.3dev0", False), + ("0.2.3.4dev0", False), + ("0.post1", True), + ("0.2.post1", True), + ("0.2.3.post1", True), + ("0.post1.dev0", False), + ("0.2.post1.dev0", False), + ("0.2.3.post1.dev0", False), + ("0.a1", False), + ("0.2a1", False), + ("0.2.3a1", False), + ("0.2.3.4a1", False), + ("0.a1.post2", False), + ("0.2a1.post2", False), + ("0.2.3a1.post2", False), + ("0.2.3.4a1.post2", False), + ("0.a1.post2.dev0", False), + ("0.2a1.post2.dev0", False), + ("0.2.3a1.post2.dev0", False), + ("0.2.3.4a1.post2.dev0", False), + ], +) +def test_is_stable_all_major_0_versions_are_treated_as_normal_versions( + version: str, expected: bool +) -> None: + subject = PEP440Version.parse(version) + + assert subject.is_stable() == expected + assert subject.is_unstable() == (not expected) + + +@pytest.mark.parametrize( + "version, expected", + [ + ("1", "1.post0"), + ("1.post1", "1.post2"), + ("9!1.2.3.4", "9!1.2.3.4.post0"), + ("9!1.2.3.4.post2", "9!1.2.3.4.post3"), + ("1.dev0", "1.post0"), + ("1.post1.dev0", "1.post1"), + ("1a1", "1a1.post0"), + ("1a1.dev0", "1a1.post0"), + ("1a1.post2", "1a1.post3"), + ("1a1.post2.dev0", "1a1.post2"), + ], +) +def test_next_postrelease(version: str, expected: str) -> None: + v = PEP440Version.parse(version) + assert v.next_postrelease().text == expected + + +def test_next_devrelease() -> None: + v = PEP440Version.parse("9!1.2.3a1.post2.dev3") + assert v.next_devrelease().text == "9!1.2.3a1.post2.dev4" + + +def test_first_prerelease() -> None: + v = PEP440Version.parse("9!1.2.3a1.post2.dev3") + assert v.first_prerelease().text == "9!1.2.3a0" + + +def test_first_devrelease() -> None: + v = PEP440Version.parse("9!1.2.3a1.post2.dev3") + assert v.first_devrelease().text == "9!1.2.3a1.post2.dev0" diff --git a/tests/version/test_markers.py b/tests/version/test_markers.py new file mode 100644 index 0000000..385afe8 --- /dev/null +++ b/tests/version/test_markers.py @@ -0,0 +1,1468 @@ +from __future__ import annotations + +import os + +from typing import TYPE_CHECKING + +import pytest + +from poetry.core.version.markers import AnyMarker +from poetry.core.version.markers import EmptyMarker +from poetry.core.version.markers import MarkerUnion +from poetry.core.version.markers import MultiMarker +from poetry.core.version.markers import SingleMarker +from poetry.core.version.markers import dnf +from poetry.core.version.markers import parse_marker + + +if TYPE_CHECKING: + from poetry.core.version.markers import BaseMarker + + +def test_single_marker() -> None: + m = parse_marker('sys_platform == "darwin"') + + assert isinstance(m, SingleMarker) + assert m.name == "sys_platform" + assert str(m.constraint) == "darwin" + + m = parse_marker('python_version in "2.7, 3.0, 3.1"') + + assert isinstance(m, SingleMarker) + assert m.name == "python_version" + assert str(m.constraint) == ">=2.7.0,<2.8.0 || >=3.0.0,<3.2.0" + + m = parse_marker('"2.7" in python_version') + + assert isinstance(m, SingleMarker) + assert m.name == "python_version" + assert str(m.constraint) == ">=2.7.0,<2.8.0" + + m = parse_marker('python_version not in "2.7, 3.0, 3.1"') + + assert isinstance(m, SingleMarker) + assert m.name == "python_version" + assert str(m.constraint) == "<2.7.0 || >=2.8.0,<3.0.0 || >=3.2.0" + + m = parse_marker( + "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE amd64 AMD64" + " win32 WIN32'" + ) + + assert isinstance(m, SingleMarker) + assert m.name == "platform_machine" + assert ( + str(m.constraint) + == "x86_64 || X86_64 || aarch64 || AARCH64 || ppc64le || PPC64LE || amd64 ||" + " AMD64 || win32 || WIN32" + ) + + m = parse_marker( + "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE amd64" + " AMD64 win32 WIN32'" + ) + + assert isinstance(m, SingleMarker) + assert m.name == "platform_machine" + assert ( + str(m.constraint) + == "!=x86_64, !=X86_64, !=aarch64, !=AARCH64, !=ppc64le, !=PPC64LE, !=amd64," + " !=AMD64, !=win32, !=WIN32" + ) + + +def test_single_marker_normalisation() -> None: + m1 = SingleMarker("python_version", ">=3.6") + m2 = SingleMarker("python_version", ">= 3.6") + assert m1 == m2 + assert hash(m1) == hash(m2) + + +def test_single_marker_intersect() -> None: + m = parse_marker('sys_platform == "darwin"') + + intersection = m.intersect(parse_marker('implementation_name == "cpython"')) + assert ( + str(intersection) + == 'sys_platform == "darwin" and implementation_name == "cpython"' + ) + + m = parse_marker('python_version >= "3.4"') + + intersection = m.intersect(parse_marker('python_version < "3.6"')) + assert str(intersection) == 'python_version >= "3.4" and python_version < "3.6"' + + +def test_single_marker_intersect_compacts_constraints() -> None: + m = parse_marker('python_version < "3.6"') + + intersection = m.intersect(parse_marker('python_version < "3.4"')) + assert str(intersection) == 'python_version < "3.4"' + + +def test_single_marker_intersect_with_multi() -> None: + m = parse_marker('sys_platform == "darwin"') + + intersection = m.intersect( + parse_marker('implementation_name == "cpython" and python_version >= "3.6"') + ) + assert ( + str(intersection) + == 'implementation_name == "cpython" and python_version >= "3.6" and' + ' sys_platform == "darwin"' + ) + + +def test_single_marker_intersect_with_multi_with_duplicate() -> None: + m = parse_marker('python_version < "4.0"') + + intersection = m.intersect( + parse_marker('sys_platform == "darwin" and python_version < "4.0"') + ) + assert str(intersection) == 'sys_platform == "darwin" and python_version < "4.0"' + + +def test_single_marker_intersect_with_multi_compacts_constraint() -> None: + m = parse_marker('python_version < "3.6"') + + intersection = m.intersect( + parse_marker('implementation_name == "cpython" and python_version < "3.4"') + ) + assert ( + str(intersection) + == 'implementation_name == "cpython" and python_version < "3.4"' + ) + + +def test_single_marker_intersect_with_union_leads_to_single_marker() -> None: + m = parse_marker('python_version >= "3.6"') + + intersection = m.intersect( + parse_marker('python_version < "3.6" or python_version >= "3.7"') + ) + assert str(intersection) == 'python_version >= "3.7"' + + +def test_single_marker_intersect_with_union_leads_to_empty() -> None: + m = parse_marker('python_version == "3.7"') + + intersection = m.intersect( + parse_marker('python_version < "3.7" or python_version >= "3.8"') + ) + assert intersection.is_empty() + + +def test_single_marker_not_in_python_intersection() -> None: + m = parse_marker('python_version not in "2.7, 3.0, 3.1"') + + intersection = m.intersect( + parse_marker('python_version not in "2.7, 3.0, 3.1, 3.2"') + ) + assert str(intersection) == 'python_version not in "2.7, 3.0, 3.1, 3.2"' + + +def test_single_marker_union() -> None: + m = parse_marker('sys_platform == "darwin"') + + union = m.union(parse_marker('implementation_name == "cpython"')) + assert str(union) == 'sys_platform == "darwin" or implementation_name == "cpython"' + + +def test_single_marker_union_is_any() -> None: + m = parse_marker('python_version >= "3.4"') + + union = m.union(parse_marker('python_version < "3.6"')) + assert union.is_any() + + +@pytest.mark.parametrize( + ("marker1", "marker2", "expected"), + [ + ( + 'python_version < "3.6"', + 'python_version < "3.4"', + 'python_version < "3.6"', + ), + ( + 'sys_platform == "linux"', + 'sys_platform != "win32"', + 'sys_platform != "win32"', + ), + ( + 'python_version == "3.6"', + 'python_version > "3.6"', + 'python_version >= "3.6"', + ), + ( + 'python_version == "3.6"', + 'python_version < "3.6"', + 'python_version <= "3.6"', + ), + ( + 'python_version < "3.6"', + 'python_version > "3.6"', + 'python_version != "3.6"', + ), + ], +) +def test_single_marker_union_is_single_marker( + marker1: str, marker2: str, expected: str +) -> None: + m = parse_marker(marker1) + + union = m.union(parse_marker(marker2)) + assert str(union) == expected + + +def test_single_marker_union_with_multi() -> None: + m = parse_marker('sys_platform == "darwin"') + + union = m.union( + parse_marker('implementation_name == "cpython" and python_version >= "3.6"') + ) + assert ( + str(union) + == 'implementation_name == "cpython" and python_version >= "3.6" or' + ' sys_platform == "darwin"' + ) + + +def test_single_marker_union_with_multi_duplicate() -> None: + m = parse_marker('sys_platform == "darwin" and python_version >= "3.6"') + + union = m.union( + parse_marker('sys_platform == "darwin" and python_version >= "3.6"') + ) + assert str(union) == 'sys_platform == "darwin" and python_version >= "3.6"' + + +@pytest.mark.parametrize( + ("single_marker", "multi_marker", "expected"), + [ + ( + 'python_version >= "3.6"', + 'python_version >= "3.7" and sys_platform == "win32"', + 'python_version >= "3.6"', + ), + ( + 'sys_platform == "linux"', + 'sys_platform != "linux" and sys_platform != "win32"', + 'sys_platform != "win32"', + ), + ], +) +def test_single_marker_union_with_multi_is_single_marker( + single_marker: str, multi_marker: str, expected: str +) -> None: + m = parse_marker(single_marker) + union = m.union(parse_marker(multi_marker)) + assert str(union) == expected + + +def test_single_marker_union_with_multi_cannot_be_simplified() -> None: + m = parse_marker('python_version >= "3.7"') + union = m.union(parse_marker('python_version >= "3.6" and sys_platform == "win32"')) + assert ( + str(union) + == 'python_version >= "3.6" and sys_platform == "win32" or python_version >=' + ' "3.7"' + ) + + +def test_single_marker_union_with_multi_is_union_of_single_markers() -> None: + m = parse_marker('python_version >= "3.6"') + union = m.union(parse_marker('python_version < "3.6" and sys_platform == "win32"')) + assert str(union) == 'sys_platform == "win32" or python_version >= "3.6"' + + +def test_single_marker_union_with_multi_union_is_union_of_single_markers() -> None: + m = parse_marker('python_version >= "3.6"') + union = m.union( + parse_marker( + 'python_version < "3.6" and sys_platform == "win32" or python_version <' + ' "3.6" and sys_platform == "linux"' + ) + ) + assert ( + str(union) + == 'sys_platform == "win32" or sys_platform == "linux" or python_version >=' + ' "3.6"' + ) + + +def test_single_marker_union_with_union() -> None: + m = parse_marker('sys_platform == "darwin"') + + union = m.union( + parse_marker('implementation_name == "cpython" or python_version >= "3.6"') + ) + assert ( + str(union) + == 'implementation_name == "cpython" or python_version >= "3.6" or sys_platform' + ' == "darwin"' + ) + + +def test_single_marker_not_in_python_union() -> None: + m = parse_marker('python_version not in "2.7, 3.0, 3.1"') + + union = m.union(parse_marker('python_version not in "2.7, 3.0, 3.1, 3.2"')) + assert str(union) == 'python_version not in "2.7, 3.0, 3.1"' + + +def test_single_marker_union_with_union_duplicate() -> None: + m = parse_marker('sys_platform == "darwin"') + + union = m.union(parse_marker('sys_platform == "darwin" or python_version >= "3.6"')) + assert str(union) == 'sys_platform == "darwin" or python_version >= "3.6"' + + m = parse_marker('python_version >= "3.7"') + + union = m.union(parse_marker('sys_platform == "darwin" or python_version >= "3.6"')) + assert str(union) == 'sys_platform == "darwin" or python_version >= "3.6"' + + m = parse_marker('python_version <= "3.6"') + + union = m.union(parse_marker('sys_platform == "darwin" or python_version < "3.4"')) + assert str(union) == 'sys_platform == "darwin" or python_version <= "3.6"' + + +def test_single_marker_union_with_inverse() -> None: + m = parse_marker('sys_platform == "darwin"') + union = m.union(parse_marker('sys_platform != "darwin"')) + assert union.is_any() + + +def test_multi_marker() -> None: + m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"') + + assert isinstance(m, MultiMarker) + assert m.markers == [ + parse_marker('sys_platform == "darwin"'), + parse_marker('implementation_name == "cpython"'), + ] + + +def test_multi_marker_is_empty_is_contradictory() -> None: + m = parse_marker( + 'sys_platform == "linux" and python_version >= "3.5" and python_version < "2.8"' + ) + + assert m.is_empty() + + m = parse_marker('sys_platform == "linux" and sys_platform == "win32"') + + assert m.is_empty() + + +def test_multi_complex_multi_marker_is_empty() -> None: + m1 = parse_marker( + 'python_full_version >= "3.0.0" and python_full_version < "3.4.0"' + ) + m2 = parse_marker( + 'python_version >= "3.6" and python_full_version < "3.0.0" and python_version <' + ' "3.7"' + ) + m3 = parse_marker( + 'python_version >= "3.6" and python_version < "3.7" and python_full_version >=' + ' "3.5.0"' + ) + + m = m1.intersect(m2.union(m3)) + + assert m.is_empty() + + +def test_multi_marker_intersect_multi() -> None: + m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"') + + intersection = m.intersect( + parse_marker('python_version >= "3.6" and os_name == "Windows"') + ) + assert ( + str(intersection) + == 'sys_platform == "darwin" and implementation_name == "cpython" ' + 'and python_version >= "3.6" and os_name == "Windows"' + ) + + +def test_multi_marker_intersect_multi_with_overlapping_constraints() -> None: + m = parse_marker('sys_platform == "darwin" and python_version < "3.6"') + + intersection = m.intersect( + parse_marker( + 'python_version <= "3.4" and os_name == "Windows" and sys_platform ==' + ' "darwin"' + ) + ) + assert ( + str(intersection) + == 'sys_platform == "darwin" and python_version <= "3.4" and os_name ==' + ' "Windows"' + ) + + +def test_multi_marker_intersect_with_union_drops_union() -> None: + m = parse_marker('python_version >= "3" and python_version < "4"') + m2 = parse_marker('python_version < "2" or python_version >= "3"') + assert str(m.intersect(m2)) == str(m) + assert str(m2.intersect(m)) == str(m) + + +def test_multi_marker_intersect_with_multi_union_leads_to_empty_in_one_step() -> None: + # empty marker in one step + # py == 2 and (py < 2 or py >= 3) -> empty + m = parse_marker('sys_platform == "darwin" and python_version == "2"') + m2 = parse_marker( + 'sys_platform == "darwin" and (python_version < "2" or python_version >= "3")' + ) + assert m.intersect(m2).is_empty() + assert m2.intersect(m).is_empty() + + +def test_multi_marker_intersect_with_multi_union_leads_to_empty_in_two_steps() -> None: + # empty marker in two steps + # py >= 2 and (py < 2 or py >= 3) -> py >= 3 + # py < 3 and py >= 3 -> empty + m = parse_marker('python_version >= "2" and python_version < "3"') + m2 = parse_marker( + 'sys_platform == "darwin" and (python_version < "2" or python_version >= "3")' + ) + assert m.intersect(m2).is_empty() + assert m2.intersect(m).is_empty() + + +def test_multi_marker_union_multi() -> None: + m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"') + + union = m.union(parse_marker('python_version >= "3.6" and os_name == "Windows"')) + assert ( + str(union) + == 'sys_platform == "darwin" and implementation_name == "cpython" ' + 'or python_version >= "3.6" and os_name == "Windows"' + ) + + +def test_multi_marker_union_multi_is_single_marker() -> None: + m = parse_marker('python_version >= "3" and sys_platform == "win32"') + m2 = parse_marker('sys_platform != "win32" and python_version >= "3"') + assert str(m.union(m2)) == 'python_version >= "3"' + assert str(m2.union(m)) == 'python_version >= "3"' + + +@pytest.mark.parametrize( + "marker1, marker2, expected", + [ + ( + 'python_version >= "3" and sys_platform == "win32"', + ( + 'python_version >= "3" and sys_platform != "win32" and sys_platform !=' + ' "linux"' + ), + 'python_version >= "3" and sys_platform != "linux"', + ), + ( + ( + 'python_version >= "3.8" and python_version < "4.0" and sys_platform ==' + ' "win32"' + ), + 'python_version >= "3.8" and python_version < "4.0"', + 'python_version >= "3.8" and python_version < "4.0"', + ), + ], +) +def test_multi_marker_union_multi_is_multi( + marker1: str, marker2: str, expected: str +) -> None: + m1 = parse_marker(marker1) + m2 = parse_marker(marker2) + assert str(m1.union(m2)) == expected + assert str(m2.union(m1)) == expected + + +@pytest.mark.parametrize( + "marker1, marker2, expected", + [ + # Ranges with same start + ( + 'python_version >= "3.6" and python_full_version < "3.6.2"', + 'python_version >= "3.6" and python_version < "3.7"', + 'python_version >= "3.6" and python_version < "3.7"', + ), + ( + 'python_version > "3.6" and python_full_version < "3.6.2"', + 'python_version > "3.6" and python_version < "3.7"', + 'python_version > "3.6" and python_version < "3.7"', + ), + # Ranges meet exactly + ( + 'python_version >= "3.6" and python_full_version < "3.6.2"', + 'python_full_version >= "3.6.2" and python_version < "3.7"', + 'python_version >= "3.6" and python_version < "3.7"', + ), + ( + 'python_version >= "3.6" and python_full_version <= "3.6.2"', + 'python_full_version > "3.6.2" and python_version < "3.7"', + 'python_version >= "3.6" and python_version < "3.7"', + ), + # Ranges overlap + ( + 'python_version >= "3.6" and python_full_version <= "3.6.8"', + 'python_full_version >= "3.6.2" and python_version < "3.7"', + 'python_version >= "3.6" and python_version < "3.7"', + ), + # Ranges with same end. Ideally the union would give the lower version first. + ( + 'python_version >= "3.6" and python_version < "3.7"', + 'python_full_version >= "3.6.2" and python_version < "3.7"', + 'python_version < "3.7" and python_version >= "3.6"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_full_version >= "3.6.2" and python_version <= "3.7"', + 'python_version <= "3.7" and python_version >= "3.6"', + ), + # A range covers an exact marker. + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_version == "3.6"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_version == "3.6" and implementation_name == "cpython"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_full_version == "3.6.2"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_full_version == "3.6.2" and implementation_name == "cpython"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_version == "3.7"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ( + 'python_version >= "3.6" and python_version <= "3.7"', + 'python_version == "3.7" and implementation_name == "cpython"', + 'python_version >= "3.6" and python_version <= "3.7"', + ), + ], +) +def test_version_ranges_collapse_on_union( + marker1: str, marker2: str, expected: str +) -> None: + m1 = parse_marker(marker1) + m2 = parse_marker(marker2) + assert str(m1.union(m2)) == expected + assert str(m2.union(m1)) == expected + + +def test_multi_marker_union_with_union() -> None: + m = parse_marker('sys_platform == "darwin" and implementation_name == "cpython"') + + union = m.union(parse_marker('python_version >= "3.6" or os_name == "Windows"')) + assert ( + str(union) + == 'python_version >= "3.6" or os_name == "Windows"' + ' or sys_platform == "darwin" and implementation_name == "cpython"' + ) + + +def test_multi_marker_union_with_multi_union_is_single_marker() -> None: + m = parse_marker('sys_platform == "darwin" and python_version == "3"') + m2 = parse_marker( + 'sys_platform == "darwin" and python_version < "3" or sys_platform == "darwin"' + ' and python_version > "3"' + ) + assert str(m.union(m2)) == 'sys_platform == "darwin"' + assert str(m2.union(m)) == 'sys_platform == "darwin"' + + +def test_multi_marker_union_with_union_multi_is_single_marker() -> None: + m = parse_marker('sys_platform == "darwin" and python_version == "3"') + m2 = parse_marker( + 'sys_platform == "darwin" and (python_version < "3" or python_version > "3")' + ) + assert str(m.union(m2)) == 'sys_platform == "darwin"' + assert str(m2.union(m)) == 'sys_platform == "darwin"' + + +def test_marker_union() -> None: + m = parse_marker('sys_platform == "darwin" or implementation_name == "cpython"') + + assert isinstance(m, MarkerUnion) + assert m.markers == [ + parse_marker('sys_platform == "darwin"'), + parse_marker('implementation_name == "cpython"'), + ] + + +def test_marker_union_deduplicate() -> None: + m = parse_marker( + 'sys_platform == "darwin" or implementation_name == "cpython" or sys_platform' + ' == "darwin"' + ) + + assert str(m) == 'sys_platform == "darwin" or implementation_name == "cpython"' + + +def test_marker_union_intersect_single_marker() -> None: + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + intersection = m.intersect(parse_marker('implementation_name == "cpython"')) + assert ( + str(intersection) + == 'sys_platform == "darwin" and implementation_name == "cpython" ' + 'or python_version < "3.4" and implementation_name == "cpython"' + ) + + +def test_marker_union_intersect_single_with_overlapping_constraints() -> None: + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + intersection = m.intersect(parse_marker('python_version <= "3.6"')) + assert ( + str(intersection) + == 'sys_platform == "darwin" and python_version <= "3.6" or python_version <' + ' "3.4"' + ) + + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + intersection = m.intersect(parse_marker('sys_platform == "darwin"')) + assert str(intersection) == 'sys_platform == "darwin"' + + +def test_marker_union_intersect_marker_union() -> None: + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + intersection = m.intersect( + parse_marker('implementation_name == "cpython" or os_name == "Windows"') + ) + assert ( + str(intersection) + == 'sys_platform == "darwin" and implementation_name == "cpython" ' + 'or sys_platform == "darwin" and os_name == "Windows" or ' + 'python_version < "3.4" and implementation_name == "cpython" or ' + 'python_version < "3.4" and os_name == "Windows"' + ) + + +def test_marker_union_intersect_marker_union_drops_unnecessary_markers() -> None: + m = parse_marker( + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.4" and python_version < "4.0"' + ) + m2 = parse_marker( + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.4" and python_version < "4.0"' + ) + + intersection = m.intersect(m2) + expected = ( + 'python_version >= "2.7" and python_version < "2.8" ' + 'or python_version >= "3.4" and python_version < "4.0"' + ) + assert str(intersection) == expected + + +def test_marker_union_intersect_multi_marker() -> None: + m1 = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + m2 = parse_marker('implementation_name == "cpython" and os_name == "Windows"') + + expected = ( + 'implementation_name == "cpython" and os_name == "Windows" and sys_platform' + ' == "darwin" or implementation_name == "cpython" and os_name == "Windows"' + ' and python_version < "3.4"' + ) + + intersection = m1.intersect(m2) + assert str(intersection) == expected + + intersection = m2.intersect(m1) + assert str(intersection) == expected + + +def test_marker_union_union_with_union() -> None: + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + union = m.union( + parse_marker('implementation_name == "cpython" or os_name == "Windows"') + ) + assert ( + str(union) + == 'sys_platform == "darwin" or python_version < "3.4" ' + 'or implementation_name == "cpython" or os_name == "Windows"' + ) + + +def test_marker_union_union_duplicates() -> None: + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + union = m.union(parse_marker('sys_platform == "darwin" or os_name == "Windows"')) + assert ( + str(union) + == 'sys_platform == "darwin" or python_version < "3.4" or os_name == "Windows"' + ) + + m = parse_marker('sys_platform == "darwin" or python_version < "3.4"') + + union = m.union( + parse_marker( + 'sys_platform == "darwin" or os_name == "Windows" or python_version <=' + ' "3.6"' + ) + ) + assert ( + str(union) + == 'sys_platform == "darwin" or python_version <= "3.6" or os_name == "Windows"' + ) + + +def test_marker_union_all_any() -> None: + union = MarkerUnion(parse_marker(""), parse_marker("")) + + assert union.is_any() + + +def test_marker_union_not_all_any() -> None: + union = MarkerUnion(parse_marker(""), parse_marker(""), parse_marker("")) + + assert union.is_any() + + +def test_marker_union_all_empty() -> None: + union = MarkerUnion(parse_marker(""), parse_marker("")) + + assert union.is_empty() + + +def test_marker_union_not_all_empty() -> None: + union = MarkerUnion( + parse_marker(""), parse_marker(""), parse_marker("") + ) + + assert not union.is_empty() + + +def test_marker_str_conversion_skips_empty_and_any() -> None: + union = MarkerUnion( + parse_marker(""), + parse_marker( + 'sys_platform == "darwin" or python_version <= "3.6" or os_name ==' + ' "Windows"' + ), + parse_marker(""), + ) + + assert ( + str(union) + == 'sys_platform == "darwin" or python_version <= "3.6" or os_name == "Windows"' + ) + + +def test_intersect_compacts_constraints() -> None: + m = parse_marker('python_version < "4.0"') + + intersection = m.intersect(parse_marker('python_version < "5.0"')) + assert str(intersection) == 'python_version < "4.0"' + + +def test_multi_marker_removes_duplicates() -> None: + m = parse_marker('sys_platform == "win32" and sys_platform == "win32"') + + assert str(m) == 'sys_platform == "win32"' + + m = parse_marker( + 'sys_platform == "darwin" and implementation_name == "cpython" ' + 'and sys_platform == "darwin" and implementation_name == "cpython"' + ) + + assert str(m) == 'sys_platform == "darwin" and implementation_name == "cpython"' + + +@pytest.mark.parametrize( + ("marker_string", "environment", "expected"), + [ + (f"os_name == '{os.name}'", None, True), + ("os_name == 'foo'", {"os_name": "foo"}, True), + ("os_name == 'foo'", {"os_name": "bar"}, False), + ("'2.7' in python_version", {"python_version": "2.7.5"}, True), + ("'2.7' not in python_version", {"python_version": "2.7"}, False), + ( + "os_name == 'foo' and python_version ~= '2.7.0'", + {"os_name": "foo", "python_version": "2.7.6"}, + True, + ), + ( + "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')", + {"os_name": "foo", "python_version": "2.7.4"}, + True, + ), + ( + "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')", + {"os_name": "bar", "python_version": "2.7.4"}, + True, + ), + ( + "python_version ~= '2.7.0' and (os_name == 'foo' or os_name == 'bar')", + {"os_name": "other", "python_version": "2.7.4"}, + False, + ), + ("extra == 'security'", {"extra": "quux"}, False), + ("extra == 'security'", {"extra": "security"}, True), + (f"os.name == '{os.name}'", None, True), + ("sys.platform == 'win32'", {"sys_platform": "linux2"}, False), + ("platform.version in 'Ubuntu'", {"platform_version": "#39"}, False), + ("platform.machine=='x86_64'", {"platform_machine": "x86_64"}, True), + ( + "platform.python_implementation=='Jython'", + {"platform_python_implementation": "CPython"}, + False, + ), + ( + "python_version == '2.5' and platform.python_implementation!= 'Jython'", + {"python_version": "2.7"}, + False, + ), + ( + ( + "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE" + " amd64 AMD64 win32 WIN32'" + ), + {"platform_machine": "foo"}, + False, + ), + ( + ( + "platform_machine in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE" + " amd64 AMD64 win32 WIN32'" + ), + {"platform_machine": "x86_64"}, + True, + ), + ( + ( + "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE" + " amd64 AMD64 win32 WIN32'" + ), + {"platform_machine": "foo"}, + True, + ), + ( + ( + "platform_machine not in 'x86_64 X86_64 aarch64 AARCH64 ppc64le PPC64LE" + " amd64 AMD64 win32 WIN32'" + ), + {"platform_machine": "x86_64"}, + False, + ), + ], +) +def test_validate( + marker_string: str, environment: dict[str, str] | None, expected: bool +) -> None: + m = parse_marker(marker_string) + + assert m.validate(environment) is expected + + +@pytest.mark.parametrize( + "marker, env", + [ + ( + 'platform_release >= "9.0" and platform_release < "11.0"', + {"platform_release": "10.0"}, + ) + ], +) +def test_parse_version_like_markers(marker: str, env: dict[str, str]) -> None: + m = parse_marker(marker) + + assert m.validate(env) + + +@pytest.mark.parametrize( + "marker, expected", + [ + ('python_version >= "3.6"', 'python_version >= "3.6"'), + ('python_version >= "3.6" and extra == "foo"', 'python_version >= "3.6"'), + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar")', + 'python_version >= "3.6"', + ), + ( + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar") or' + ' implementation_name == "pypy"' + ), + 'python_version >= "3.6" or implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" and extra == "foo" or implementation_name ==' + ' "pypy" and extra == "bar"' + ), + 'python_version >= "3.6" or implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" or extra == "foo" and implementation_name ==' + ' "pypy" or extra == "bar"' + ), + 'python_version >= "3.6" or implementation_name == "pypy"', + ), + ('extra == "foo"', ""), + ('extra == "foo" or extra == "bar"', ""), + ], +) +def test_without_extras(marker: str, expected: str) -> None: + m = parse_marker(marker) + + assert str(m.without_extras()) == expected + + +@pytest.mark.parametrize( + "marker, excluded, expected", + [ + ('python_version >= "3.6"', "implementation_name", 'python_version >= "3.6"'), + ('python_version >= "3.6"', "python_version", "*"), + ( + 'python_version >= "3.6" and extra == "foo"', + "extra", + 'python_version >= "3.6"', + ), + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar")', + "python_version", + 'extra == "foo" or extra == "bar"', + ), + ( + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar") or' + ' implementation_name == "pypy"' + ), + "python_version", + 'extra == "foo" or extra == "bar" or implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" and extra == "foo" or implementation_name ==' + ' "pypy" and extra == "bar"' + ), + "implementation_name", + 'python_version >= "3.6" and extra == "foo" or extra == "bar"', + ), + ( + ( + 'python_version >= "3.6" or extra == "foo" and implementation_name ==' + ' "pypy" or extra == "bar"' + ), + "implementation_name", + 'python_version >= "3.6" or extra == "foo" or extra == "bar"', + ), + ( + 'extra == "foo" and python_version >= "3.6" or python_version >= "3.6"', + "extra", + 'python_version >= "3.6"', + ), + ], +) +def test_exclude(marker: str, excluded: str, expected: str) -> None: + m = parse_marker(marker) + + if expected == "*": + assert m.exclude(excluded).is_any() + else: + assert str(m.exclude(excluded)) == expected + + +@pytest.mark.parametrize( + "marker, only, expected", + [ + ('python_version >= "3.6"', ["python_version"], 'python_version >= "3.6"'), + ('python_version >= "3.6"', ["sys_platform"], ""), + ( + 'python_version >= "3.6" and extra == "foo"', + ["python_version"], + 'python_version >= "3.6"', + ), + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar")', + ["extra"], + 'extra == "foo" or extra == "bar"', + ), + ( + ( + 'python_version >= "3.6" and (extra == "foo" or extra == "bar") or' + ' implementation_name == "pypy"' + ), + ["implementation_name"], + 'implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" and extra == "foo" or implementation_name ==' + ' "pypy" and extra == "bar"' + ), + ["implementation_name"], + 'implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" or extra == "foo" and implementation_name ==' + ' "pypy" or extra == "bar"' + ), + ["implementation_name"], + 'implementation_name == "pypy"', + ), + ( + ( + 'python_version >= "3.6" or extra == "foo" and implementation_name ==' + ' "pypy" or extra == "bar"' + ), + ["implementation_name", "python_version"], + 'python_version >= "3.6" or implementation_name == "pypy"', + ), + ], +) +def test_only(marker: str, only: list[str], expected: str) -> None: + m = parse_marker(marker) + + assert str(m.only(*only)) == expected + + +def test_union_of_a_single_marker_is_the_single_marker() -> None: + union = MarkerUnion.of(SingleMarker("python_version", ">= 2.7")) + + assert SingleMarker("python_version", ">= 2.7") == union + + +def test_union_of_multi_with_a_containing_single() -> None: + single = parse_marker('python_version >= "2.7"') + multi = parse_marker('python_version >= "2.7" and extra == "foo"') + union = multi.union(single) + + assert union == single + + +@pytest.mark.parametrize( + "marker, inverse", + [ + ('implementation_name == "pypy"', 'implementation_name != "pypy"'), + ('implementation_name === "pypy"', 'implementation_name != "pypy"'), + ('implementation_name != "pypy"', 'implementation_name == "pypy"'), + ('python_version in "2.7, 3.0, 3.1"', 'python_version not in "2.7, 3.0, 3.1"'), + ('python_version not in "2.7, 3.0, 3.1"', 'python_version in "2.7, 3.0, 3.1"'), + ('python_version < "3.6"', 'python_version >= "3.6"'), + ('python_version >= "3.6"', 'python_version < "3.6"'), + ('python_version <= "3.6"', 'python_version > "3.6"'), + ('python_version > "3.6"', 'python_version <= "3.6"'), + ( + 'python_version > "3.6" or implementation_name == "pypy"', + 'python_version <= "3.6" and implementation_name != "pypy"', + ), + ( + 'python_version <= "3.6" and implementation_name != "pypy"', + 'python_version > "3.6" or implementation_name == "pypy"', + ), + ( + 'python_version ~= "3.6"', + 'python_version < "3.6" or python_version >= "4.0"', + ), + ( + 'python_full_version ~= "3.6.3"', + 'python_full_version < "3.6.3" or python_full_version >= "3.7.0"', + ), + ], +) +def test_invert(marker: str, inverse: str) -> None: + m = parse_marker(marker) + + assert parse_marker(inverse) == m.invert() + + +@pytest.mark.parametrize( + "marker, expected", + [ + ( + ( + 'python_version >= "3.6" or python_version < "3.7" or python_version <' + ' "3.6"' + ), + 'python_version >= "3.6" or python_version < "3.7"', + ), + ], +) +def test_union_should_drop_markers_if_their_complement_is_present( + marker: str, expected: str +) -> None: + m = parse_marker(marker) + + assert parse_marker(expected) == m + + +@pytest.mark.parametrize( + "scheme, marker, expected", + [ + ("empty", EmptyMarker(), EmptyMarker()), + ("any", AnyMarker(), AnyMarker()), + ( + "A_", + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", ">=3.7"), + ), + ( + "AB_", + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + ), + ( + "A+B_", + MarkerUnion( + SingleMarker("python_version", "<3.7"), + SingleMarker("python_version", ">=3.9"), + ), + MarkerUnion( + SingleMarker("python_version", "<3.7"), + SingleMarker("python_version", ">=3.9"), + ), + ), + ( + "AB+AC_", + MarkerUnion( + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("sys_platform", "linux"), + ), + ), + MarkerUnion( + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("sys_platform", "linux"), + ), + ), + ), + ( + "A(B+C)_AB+AC", + MultiMarker( + SingleMarker("python_version", ">=3.7"), + MarkerUnion( + SingleMarker("python_version", "<3.9"), + SingleMarker("sys_platform", "linux"), + ), + ), + MarkerUnion( + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("sys_platform", "linux"), + ), + ), + ), + ( + "(A+B)(C+D)_AC+AD+BC+BD", + MultiMarker( + MarkerUnion( + SingleMarker("python_version", ">=3.7"), + SingleMarker("sys_platform", "win32"), + ), + MarkerUnion( + SingleMarker("python_version", "<3.9"), + SingleMarker("sys_platform", "linux"), + ), + ), + MarkerUnion( + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("python_version", "<3.9"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.7"), + SingleMarker("sys_platform", "linux"), + ), + MultiMarker( + SingleMarker("sys_platform", "win32"), + SingleMarker("python_version", "<3.9"), + ), + ), + ), + ( + "A(B+C)+(D+E)(F+G)_AB+AC+DF+DG+EF+DG", + MarkerUnion( + MultiMarker( + SingleMarker("sys_platform", "win32"), + MarkerUnion( + SingleMarker("python_version", "<3.7"), + SingleMarker("python_version", ">=3.9"), + ), + ), + MultiMarker( + MarkerUnion( + SingleMarker("python_version", "<3.8"), + SingleMarker("python_version", ">=3.9"), + ), + MarkerUnion( + SingleMarker("sys_platform", "linux"), + SingleMarker("python_version", ">=3.9"), + ), + ), + ), + MarkerUnion( + MultiMarker( + SingleMarker("sys_platform", "win32"), + SingleMarker("python_version", "<3.7"), + ), + SingleMarker("python_version", ">=3.9"), + MultiMarker( + SingleMarker("python_version", "<3.8"), + SingleMarker("sys_platform", "linux"), + ), + ), + ), + ( + "(A+B(C+D))(E+F)_AE+AF+BCE+BCF+BDE+BDF", + MultiMarker( + MarkerUnion( + SingleMarker("python_version", ">=3.9"), + MultiMarker( + SingleMarker("implementation_name", "cpython"), + MarkerUnion( + SingleMarker("python_version", "<3.7"), + SingleMarker("python_version", ">=3.8"), + ), + ), + ), + MarkerUnion( + SingleMarker("sys_platform", "win32"), + SingleMarker("sys_platform", "linux"), + ), + ), + MarkerUnion( + MultiMarker( + SingleMarker("python_version", ">=3.9"), + SingleMarker("sys_platform", "win32"), + ), + MultiMarker( + SingleMarker("python_version", ">=3.9"), + SingleMarker("sys_platform", "linux"), + ), + MultiMarker( + SingleMarker("implementation_name", "cpython"), + SingleMarker("python_version", "<3.7"), + SingleMarker("sys_platform", "win32"), + ), + MultiMarker( + SingleMarker("implementation_name", "cpython"), + SingleMarker("python_version", "<3.7"), + SingleMarker("sys_platform", "linux"), + ), + MultiMarker( + SingleMarker("implementation_name", "cpython"), + SingleMarker("python_version", ">=3.8"), + SingleMarker("sys_platform", "win32"), + ), + MultiMarker( + SingleMarker("implementation_name", "cpython"), + SingleMarker("python_version", ">=3.8"), + SingleMarker("sys_platform", "linux"), + ), + ), + ), + ], +) +def test_dnf(scheme: str, marker: BaseMarker, expected: BaseMarker) -> None: + assert dnf(marker) == expected + + +def test_single_markers_are_found_in_complex_intersection() -> None: + m1 = parse_marker('implementation_name != "pypy" and python_version <= "3.6"') + m2 = parse_marker( + 'python_version >= "3.6" and python_version < "4.0" and implementation_name ==' + ' "cpython"' + ) + intersection = m1.intersect(m2) + assert ( + str(intersection) + == 'implementation_name == "cpython" and python_version == "3.6"' + ) + + +@pytest.mark.parametrize( + "python_version, python_full_version, " + "expected_intersection_version, expected_union_version", + [ + # python_version > 3.6 (equal to python_full_version >= 3.7.0) + ('> "3.6"', '> "3.5.2"', '> "3.6"', '> "3.5.2"'), + ('> "3.6"', '>= "3.5.2"', '> "3.6"', '>= "3.5.2"'), + ('> "3.6"', '> "3.6.2"', '> "3.6"', '> "3.6.2"'), + ('> "3.6"', '>= "3.6.2"', '> "3.6"', '>= "3.6.2"'), + ('> "3.6"', '> "3.7.0"', '> "3.7.0"', '> "3.6"'), + ('> "3.6"', '>= "3.7.0"', '> "3.6"', '> "3.6"'), + ('> "3.6"', '> "3.7.1"', '> "3.7.1"', '> "3.6"'), + ('> "3.6"', '>= "3.7.1"', '>= "3.7.1"', '> "3.6"'), + ('> "3.6"', '== "3.6.2"', "", None), + ('> "3.6"', '== "3.7.0"', '== "3.7.0"', '> "3.6"'), + ('> "3.6"', '== "3.7.1"', '== "3.7.1"', '> "3.6"'), + ('> "3.6"', '!= "3.6.2"', '> "3.6"', '!= "3.6.2"'), + ('> "3.6"', '!= "3.7.0"', '> "3.7.0"', ""), + ('> "3.6"', '!= "3.7.1"', None, ""), + ('> "3.6"', '< "3.7.0"', "", ""), + ('> "3.6"', '<= "3.7.0"', '== "3.7.0"', ""), + ('> "3.6"', '< "3.7.1"', None, ""), + ('> "3.6"', '<= "3.7.1"', None, ""), + # python_version >= 3.6 (equal to python_full_version >= 3.6.0) + ('>= "3.6"', '> "3.5.2"', '>= "3.6"', '> "3.5.2"'), + ('>= "3.6"', '>= "3.5.2"', '>= "3.6"', '>= "3.5.2"'), + ('>= "3.6"', '> "3.6.0"', '> "3.6.0"', '>= "3.6"'), + ('>= "3.6"', '>= "3.6.0"', '>= "3.6"', '>= "3.6"'), + ('>= "3.6"', '> "3.6.1"', '> "3.6.1"', '>= "3.6"'), + ('>= "3.6"', '>= "3.6.1"', '>= "3.6.1"', '>= "3.6"'), + ('>= "3.6"', '== "3.5.2"', "", None), + ('>= "3.6"', '== "3.6.0"', '== "3.6.0"', '>= "3.6"'), + ('>= "3.6"', '!= "3.5.2"', '>= "3.6"', '!= "3.5.2"'), + ('>= "3.6"', '!= "3.6.0"', '> "3.6.0"', ""), + ('>= "3.6"', '!= "3.6.1"', None, ""), + ('>= "3.6"', '!= "3.7.1"', None, ""), + ('>= "3.6"', '< "3.6.0"', "", ""), + ('>= "3.6"', '<= "3.6.0"', '== "3.6.0"', ""), + ('>= "3.6"', '< "3.6.1"', None, ""), # '== "3.6.0"' + ('>= "3.6"', '<= "3.6.1"', None, ""), + # python_version < 3.6 (equal to python_full_version < 3.6.0) + ('< "3.6"', '< "3.5.2"', '< "3.5.2"', '< "3.6"'), + ('< "3.6"', '<= "3.5.2"', '<= "3.5.2"', '< "3.6"'), + ('< "3.6"', '< "3.6.0"', '< "3.6"', '< "3.6"'), + ('< "3.6"', '<= "3.6.0"', '< "3.6"', '<= "3.6.0"'), + ('< "3.6"', '< "3.6.1"', '< "3.6"', '< "3.6.1"'), + ('< "3.6"', '<= "3.6.1"', '< "3.6"', '<= "3.6.1"'), + ('< "3.6"', '== "3.5.2"', '== "3.5.2"', '< "3.6"'), + ('< "3.6"', '== "3.6.0"', "", '<= "3.6.0"'), + ('< "3.6"', '!= "3.5.2"', None, ""), + ('< "3.6"', '!= "3.6.0"', '< "3.6"', '!= "3.6.0"'), + ('< "3.6"', '> "3.6.0"', "", '!= "3.6.0"'), + ('< "3.6"', '>= "3.6.0"', "", ""), + ('< "3.6"', '> "3.5.2"', None, ""), + ('< "3.6"', '>= "3.5.2"', None, ""), + # python_version <= 3.6 (equal to python_full_version < 3.7.0) + ('<= "3.6"', '< "3.6.1"', '< "3.6.1"', '<= "3.6"'), + ('<= "3.6"', '<= "3.6.1"', '<= "3.6.1"', '<= "3.6"'), + ('<= "3.6"', '< "3.7.0"', '<= "3.6"', '<= "3.6"'), + ('<= "3.6"', '<= "3.7.0"', '<= "3.6"', '<= "3.7.0"'), + ('<= "3.6"', '== "3.6.1"', '== "3.6.1"', '<= "3.6"'), + ('<= "3.6"', '== "3.7.0"', "", '<= "3.7.0"'), + ('<= "3.6"', '!= "3.6.1"', None, ""), + ('<= "3.6"', '!= "3.7.0"', '<= "3.6"', '!= "3.7.0"'), + ('<= "3.6"', '> "3.7.0"', "", '!= "3.7.0"'), + ('<= "3.6"', '>= "3.7.0"', "", ""), + ('<= "3.6"', '> "3.6.2"', None, ""), + ('<= "3.6"', '>= "3.6.2"', None, ""), + # python_version == 3.6 # noqa: E800 + # (equal to python_full_version >= 3.6.0 and python_full_version < 3.7.0) + ('== "3.6"', '< "3.5.2"', "", None), + ('== "3.6"', '<= "3.5.2"', "", None), + ('== "3.6"', '> "3.5.2"', '== "3.6"', '> "3.5.2"'), + ('== "3.6"', '>= "3.5.2"', '== "3.6"', '>= "3.5.2"'), + ('== "3.6"', '!= "3.5.2"', '== "3.6"', '!= "3.5.2"'), + ('== "3.6"', '< "3.6.0"', "", '< "3.7.0"'), + ('== "3.6"', '<= "3.6.0"', '== "3.6.0"', '< "3.7.0"'), + ('== "3.6"', '> "3.6.0"', None, '>= "3.6.0"'), + ('== "3.6"', '>= "3.6.0"', '== "3.6"', '>= "3.6.0"'), + ('== "3.6"', '!= "3.6.0"', None, ""), + ('== "3.6"', '< "3.6.1"', None, '< "3.7.0"'), + ('== "3.6"', '<= "3.6.1"', None, '< "3.7.0"'), + ('== "3.6"', '> "3.6.1"', None, '>= "3.6.0"'), + ('== "3.6"', '>= "3.6.1"', None, '>= "3.6.0"'), + ('== "3.6"', '!= "3.6.1"', None, ""), + ('== "3.6"', '< "3.7.0"', '== "3.6"', '< "3.7.0"'), + ('== "3.6"', '<= "3.7.0"', '== "3.6"', '<= "3.7.0"'), + ('== "3.6"', '> "3.7.0"', "", None), + ('== "3.6"', '>= "3.7.0"', "", '>= "3.6.0"'), + ('== "3.6"', '!= "3.7.0"', '== "3.6"', '!= "3.7.0"'), + ('== "3.6"', '<= "3.7.1"', '== "3.6"', '<= "3.7.1"'), + ('== "3.6"', '< "3.7.1"', '== "3.6"', '< "3.7.1"'), + ('== "3.6"', '> "3.7.1"', "", None), + ('== "3.6"', '>= "3.7.1"', "", None), + ('== "3.6"', '!= "3.7.1"', '== "3.6"', '!= "3.7.1"'), + # python_version != 3.6 # noqa: E800 + # (equal to python_full_version < 3.6.0 or python_full_version >= 3.7.0) + ('!= "3.6"', '< "3.5.2"', '< "3.5.2"', '!= "3.6"'), + ('!= "3.6"', '<= "3.5.2"', '<= "3.5.2"', '!= "3.6"'), + ('!= "3.6"', '> "3.5.2"', None, ""), + ('!= "3.6"', '>= "3.5.2"', None, ""), + ('!= "3.6"', '!= "3.5.2"', None, ""), + ('!= "3.6"', '< "3.6.0"', '< "3.6.0"', '!= "3.6"'), + ('!= "3.6"', '<= "3.6.0"', '< "3.6.0"', None), + ('!= "3.6"', '> "3.6.0"', '>= "3.7.0"', '!= "3.6.0"'), + ('!= "3.6"', '>= "3.6.0"', '>= "3.7.0"', ""), + ('!= "3.6"', '!= "3.6.0"', '!= "3.6"', '!= "3.6.0"'), + ('!= "3.6"', '< "3.6.1"', '< "3.6.0"', None), + ('!= "3.6"', '<= "3.6.1"', '< "3.6.0"', None), + ('!= "3.6"', '> "3.6.1"', '>= "3.7.0"', None), + ('!= "3.6"', '>= "3.6.1"', '>= "3.7.0"', None), + ('!= "3.6"', '!= "3.6.1"', '!= "3.6"', '!= "3.6.1"'), + ('!= "3.6"', '< "3.7.0"', '< "3.6.0"', ""), + ('!= "3.6"', '<= "3.7.0"', None, ""), + ('!= "3.6"', '> "3.7.0"', '> "3.7.0"', '!= "3.6"'), + ('!= "3.6"', '>= "3.7.0"', '>= "3.7.0"', '!= "3.6"'), + ('!= "3.6"', '!= "3.7.0"', None, ""), + ('!= "3.6"', '<= "3.7.1"', None, ""), + ('!= "3.6"', '< "3.7.1"', None, ""), + ('!= "3.6"', '> "3.7.1"', '> "3.7.1"', '!= "3.6"'), + ('!= "3.6"', '>= "3.7.1"', '>= "3.7.1"', '!= "3.6"'), + ('!= "3.6"', '!= "3.7.1"', None, ""), + ], +) +def test_merging_python_version_and_python_full_version( + python_version: str, + python_full_version: str, + expected_intersection_version: str, + expected_union_version: str, +) -> None: + m = f"python_version {python_version}" + m2 = f"python_full_version {python_full_version}" + + def get_expected_marker(expected_version: str, op: str) -> str: + if expected_version is None: + expected = f"{m} {op} {m2}" + elif expected_version in ("", ""): + expected = expected_version + else: + expected_marker_name = ( + "python_version" + if expected_version.count(".") < 2 + else "python_full_version" + ) + expected = f"{expected_marker_name} {expected_version}" + return expected + + expected_intersection = get_expected_marker(expected_intersection_version, "and") + expected_union = get_expected_marker(expected_union_version, "or") + + intersection = parse_marker(m).intersect(parse_marker(m2)) + assert str(intersection) == expected_intersection + + union = parse_marker(m).union(parse_marker(m2)) + assert str(union) == expected_union diff --git a/tests/version/test_requirements.py b/tests/version/test_requirements.py new file mode 100644 index 0000000..5c0e289 --- /dev/null +++ b/tests/version/test_requirements.py @@ -0,0 +1,134 @@ +from __future__ import annotations + +import re + +from typing import Any + +import pytest + +from poetry.core.constraints.version import parse_constraint +from poetry.core.version.requirements import InvalidRequirement +from poetry.core.version.requirements import Requirement + + +def assert_requirement( + req: Requirement, + name: str, + url: str | None = None, + extras: list[str] | None = None, + constraint: str = "*", + marker: str | None = None, +) -> None: + if extras is None: + extras = [] + + assert name == req.name + assert url == req.url + assert sorted(extras) == sorted(req.extras) + assert parse_constraint(constraint) == req.constraint + + if marker: + assert marker == str(req.marker) + + +@pytest.mark.parametrize( + ["string", "expected"], + [ + ("A", {"name": "A"}), + ("aa", {"name": "aa"}), + ("name", {"name": "name"}), + ("foo-bar.quux_baz", {"name": "foo-bar.quux_baz"}), + ("name>=3", {"name": "name", "constraint": ">=3"}), + ("name>=3.*", {"name": "name", "constraint": ">=3.0"}), + ("name<3.*", {"name": "name", "constraint": "<3.0"}), + ("name>3.5.*", {"name": "name", "constraint": ">3.5"}), + ("name==1.0.post1", {"name": "name", "constraint": "==1.0.post1"}), + ("name==1.2.0b1.dev0", {"name": "name", "constraint": "==1.2.0b1.dev0"}), + ( + "name>=1.2.3;python_version=='2.6'", + { + "name": "name", + "constraint": ">=1.2.3", + "marker": 'python_version == "2.6"', + }, + ), + ("name (==4)", {"name": "name", "constraint": "==4"}), + ("name>=2,<3", {"name": "name", "constraint": ">=2,<3"}), + ("name >=2, <3", {"name": "name", "constraint": ">=2,<3"}), + # PEP 440: https://www.python.org/dev/peps/pep-0440/#compatible-release + ("name (~=3.2)", {"name": "name", "constraint": ">=3.2.0,<4.0"}), + ("name (~=3.2.1)", {"name": "name", "constraint": ">=3.2.1,<3.3.0"}), + # Extras + ("foobar [quux,bar]", {"name": "foobar", "extras": ["quux", "bar"]}), + ("foo[]", {"name": "foo"}), + # Url + ("foo @ http://example.com", {"name": "foo", "url": "http://example.com"}), + ( + 'foo @ http://example.com ; os_name=="a"', + {"name": "foo", "url": "http://example.com", "marker": 'os_name == "a"'}, + ), + ( + "name @ file:///absolute/path", + {"name": "name", "url": "file:///absolute/path"}, + ), + ( + "name @ file://.", + {"name": "name", "url": "file://."}, + ), + ( + "name [fred,bar] @ http://foo.com ; python_version=='2.7'", + { + "name": "name", + "url": "http://foo.com", + "extras": ["fred", "bar"], + "marker": 'python_version == "2.7"', + }, + ), + ( + ( + "foo @ https://example.com/name;v=1.1/?query=foo&bar=baz#blah ;" + " python_version=='3.4'" + ), + { + "name": "foo", + "url": "https://example.com/name;v=1.1/?query=foo&bar=baz#blah", + "marker": 'python_version == "3.4"', + }, + ), + ( + ( + 'foo (>=1.2.3) ; python_version >= "2.7" and python_version < "2.8" or' + ' python_version >= "3.4" and python_version < "3.5"' + ), + { + "name": "foo", + "constraint": ">=1.2.3", + "marker": ( + 'python_version >= "2.7" and python_version < "2.8" or' + ' python_version >= "3.4" and python_version < "3.5"' + ), + }, + ), + ], +) +def test_requirement(string: str, expected: dict[str, Any]) -> None: + req = Requirement(string) + + assert_requirement(req, **expected) + + +@pytest.mark.parametrize( + ["string", "exception"], + [ + ("foo!", "Unexpected character at column 4\n\nfoo!\n ^\n"), + ("foo (>=bar)", 'invalid version constraint ">=bar"'), + ("name @ file:.", "invalid URL"), + ("name @ file:/.", "invalid URL"), + ], +) +def test_invalid_requirement(string: str, exception: str) -> None: + with pytest.raises( + InvalidRequirement, + match=re.escape(f"The requirement is invalid: {exception}"), + ): + Requirement(string) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..082a616 --- /dev/null +++ b/tox.ini @@ -0,0 +1,22 @@ +[tox] +minversion = 3.3.0 +isolated_build = True +envlist = py37, py38, py39, py310, pypy3, integration + +[testenv] +whitelist_externals = poetry +skip_install = true +commands = + poetry install -v + poetry run pytest {posargs} tests/ + + +[testenv:integration] +basepython = python3 +skip_install = false +deps = + pytest + build + virtualenv +commands = + pytest --integration {posargs} tests/integration diff --git a/vendors/patches/jsonschema.patch b/vendors/patches/jsonschema.patch new file mode 100644 index 0000000..3af40eb --- /dev/null +++ b/vendors/patches/jsonschema.patch @@ -0,0 +1,36 @@ +diff --git b/src/poetry/core/_vendor/jsonschema/_utils.py a/src/poetry/core/_vendor/jsonschema/_utils.py +index a2ad5a9..d4f5697 100644 +--- b/src/poetry/core/_vendor/jsonschema/_utils.py ++++ a/src/poetry/core/_vendor/jsonschema/_utils.py +@@ -2,15 +2,8 @@ from collections.abc import Mapping, MutableMapping, Sequence + from urllib.parse import urlsplit + import itertools + import json ++import os + import re +-import sys +- +-# The files() API was added in Python 3.9. +-if sys.version_info >= (3, 9): # pragma: no cover +- from importlib import resources +-else: # pragma: no cover +- import importlib_resources as resources # type: ignore +- + + class URIDict(MutableMapping): + """ +@@ -56,9 +49,12 @@ def load_schema(name): + """ + Load a schema from ./schemas/``name``.json and return it. + """ ++ with open( ++ os.path.join(os.path.dirname(__file__), "schemas", "{0}.json".format(name)), ++ encoding="utf-8" ++ ) as f: ++ data = f.read() + +- path = resources.files(__package__).joinpath(f"schemas/{name}.json") +- data = path.read_text(encoding="utf-8") + return json.loads(data) + + diff --git a/vendors/poetry.lock b/vendors/poetry.lock new file mode 100644 index 0000000..ddafbf9 --- /dev/null +++ b/vendors/poetry.lock @@ -0,0 +1,219 @@ +[[package]] +name = "attrs" +version = "22.1.0" +description = "Classes Without Boilerplate" +category = "main" +optional = false +python-versions = ">=3.5" + +[package.extras] +dev = ["cloudpickle", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "mypy (>=0.900,!=0.940)", "pre-commit", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "sphinx", "sphinx-notfound-page", "zope.interface"] +docs = ["furo", "sphinx", "sphinx-notfound-page", "zope.interface"] +tests = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "zope.interface"] +tests-no-zope = ["cloudpickle", "coverage[toml] (>=5.0.2)", "hypothesis", "mypy (>=0.900,!=0.940)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins"] + +[[package]] +name = "importlib-metadata" +version = "5.0.0" +description = "Read metadata from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +typing-extensions = {version = ">=3.6.4", markers = "python_version < \"3.8\""} +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +perf = ["ipython"] +testing = ["flake8 (<5)", "flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)"] + +[[package]] +name = "importlib-resources" +version = "5.10.0" +description = "Read resources from Python packages" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "jsonschema" +version = "4.17.0" +description = "An implementation of JSON Schema validation for Python" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.dependencies] +attrs = ">=17.4.0" +importlib-metadata = {version = "*", markers = "python_version < \"3.8\""} +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +typing-extensions = {version = "*", markers = "python_version < \"3.8\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "lark" +version = "1.1.4" +description = "a modern parsing library" +category = "main" +optional = false +python-versions = "*" + +[package.extras] +atomic-cache = ["atomicwrites"] +nearley = ["js2py"] +regex = ["regex"] + +[[package]] +name = "packaging" +version = "21.3" +description = "Core utilities for Python packages" +category = "main" +optional = false +python-versions = ">=3.6" + +[package.dependencies] +pyparsing = ">=2.0.2,<3.0.5 || >3.0.5" + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "pyparsing" +version = "3.0.9" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +category = "main" +optional = false +python-versions = ">=3.6.8" + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyrsistent" +version = "0.19.2" +description = "Persistent/Functional/Immutable data structures" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "tomlkit" +version = "0.11.6" +description = "Style preserving TOML library" +category = "main" +optional = false +python-versions = ">=3.6" + +[[package]] +name = "typing-extensions" +version = "4.4.0" +description = "Backported and Experimental Type Hints for Python 3.7+" +category = "main" +optional = false +python-versions = ">=3.7" + +[[package]] +name = "zipp" +version = "3.10.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.7" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)"] +testing = ["flake8 (<5)", "func-timeout", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "1.1" +python-versions = "^3.7" +content-hash = "6cd18db509bda7b44ca6c26f16b25885b3c0c9c91ae497b774b21ce67fbf5593" + +[metadata.files] +attrs = [ + {file = "attrs-22.1.0-py2.py3-none-any.whl", hash = "sha256:86efa402f67bf2df34f51a335487cf46b1ec130d02b8d39fd248abfd30da551c"}, + {file = "attrs-22.1.0.tar.gz", hash = "sha256:29adc2665447e5191d0e7c568fde78b21f9672d344281d0c6e1ab085429b22b6"}, +] +importlib-metadata = [ + {file = "importlib_metadata-5.0.0-py3-none-any.whl", hash = "sha256:ddb0e35065e8938f867ed4928d0ae5bf2a53b7773871bfe6bcc7e4fcdc7dea43"}, + {file = "importlib_metadata-5.0.0.tar.gz", hash = "sha256:da31db32b304314d044d3c12c79bd59e307889b287ad12ff387b3500835fc2ab"}, +] +importlib-resources = [ + {file = "importlib_resources-5.10.0-py3-none-any.whl", hash = "sha256:ee17ec648f85480d523596ce49eae8ead87d5631ae1551f913c0100b5edd3437"}, + {file = "importlib_resources-5.10.0.tar.gz", hash = "sha256:c01b1b94210d9849f286b86bb51bcea7cd56dde0600d8db721d7b81330711668"}, +] +jsonschema = [ + {file = "jsonschema-4.17.0-py3-none-any.whl", hash = "sha256:f660066c3966db7d6daeaea8a75e0b68237a48e51cf49882087757bb59916248"}, + {file = "jsonschema-4.17.0.tar.gz", hash = "sha256:5bfcf2bca16a087ade17e02b282d34af7ccd749ef76241e7f9bd7c0cb8a9424d"}, +] +lark = [ + {file = "lark-1.1.4-py3-none-any.whl", hash = "sha256:a42f9f18bdc9d5571a371ae658548e81e78d1642c2145cc3b663e0bf2e9e7eae"}, + {file = "lark-1.1.4.tar.gz", hash = "sha256:eee86062b149600ef62de0d8dfd38cf85ffc737e16911e7d8c18880f8c5b1333"}, +] +packaging = [ + {file = "packaging-21.3-py3-none-any.whl", hash = "sha256:ef103e05f519cdc783ae24ea4e2e0f508a9c99b2d4969652eed6a2e1ea5bd522"}, + {file = "packaging-21.3.tar.gz", hash = "sha256:dd47c42927d89ab911e606518907cc2d3a1f38bbd026385970643f9c5b8ecfeb"}, +] +pkgutil-resolve-name = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] +pyparsing = [ + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, +] +pyrsistent = [ + {file = "pyrsistent-0.19.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d6982b5a0237e1b7d876b60265564648a69b14017f3b5f908c5be2de3f9abb7a"}, + {file = "pyrsistent-0.19.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:187d5730b0507d9285a96fca9716310d572e5464cadd19f22b63a6976254d77a"}, + {file = "pyrsistent-0.19.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:055ab45d5911d7cae397dc418808d8802fb95262751872c841c170b0dbf51eed"}, + {file = "pyrsistent-0.19.2-cp310-cp310-win32.whl", hash = "sha256:456cb30ca8bff00596519f2c53e42c245c09e1a4543945703acd4312949bfd41"}, + {file = "pyrsistent-0.19.2-cp310-cp310-win_amd64.whl", hash = "sha256:b39725209e06759217d1ac5fcdb510e98670af9e37223985f330b611f62e7425"}, + {file = "pyrsistent-0.19.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:2aede922a488861de0ad00c7630a6e2d57e8023e4be72d9d7147a9fcd2d30712"}, + {file = "pyrsistent-0.19.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:879b4c2f4d41585c42df4d7654ddffff1239dc4065bc88b745f0341828b83e78"}, + {file = "pyrsistent-0.19.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c43bec251bbd10e3cb58ced80609c5c1eb238da9ca78b964aea410fb820d00d6"}, + {file = "pyrsistent-0.19.2-cp37-cp37m-win32.whl", hash = "sha256:d690b18ac4b3e3cab73b0b7aa7dbe65978a172ff94970ff98d82f2031f8971c2"}, + {file = "pyrsistent-0.19.2-cp37-cp37m-win_amd64.whl", hash = "sha256:3ba4134a3ff0fc7ad225b6b457d1309f4698108fb6b35532d015dca8f5abed73"}, + {file = "pyrsistent-0.19.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a178209e2df710e3f142cbd05313ba0c5ebed0a55d78d9945ac7a4e09d923308"}, + {file = "pyrsistent-0.19.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e371b844cec09d8dc424d940e54bba8f67a03ebea20ff7b7b0d56f526c71d584"}, + {file = "pyrsistent-0.19.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:111156137b2e71f3a9936baf27cb322e8024dac3dc54ec7fb9f0bcf3249e68bb"}, + {file = "pyrsistent-0.19.2-cp38-cp38-win32.whl", hash = "sha256:e5d8f84d81e3729c3b506657dddfe46e8ba9c330bf1858ee33108f8bb2adb38a"}, + {file = "pyrsistent-0.19.2-cp38-cp38-win_amd64.whl", hash = "sha256:9cd3e9978d12b5d99cbdc727a3022da0430ad007dacf33d0bf554b96427f33ab"}, + {file = "pyrsistent-0.19.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:f1258f4e6c42ad0b20f9cfcc3ada5bd6b83374516cd01c0960e3cb75fdca6770"}, + {file = "pyrsistent-0.19.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21455e2b16000440e896ab99e8304617151981ed40c29e9507ef1c2e4314ee95"}, + {file = "pyrsistent-0.19.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:bfd880614c6237243ff53a0539f1cb26987a6dc8ac6e66e0c5a40617296a045e"}, + {file = "pyrsistent-0.19.2-cp39-cp39-win32.whl", hash = "sha256:71d332b0320642b3261e9fee47ab9e65872c2bd90260e5d225dabeed93cbd42b"}, + {file = "pyrsistent-0.19.2-cp39-cp39-win_amd64.whl", hash = "sha256:dec3eac7549869365fe263831f576c8457f6c833937c68542d08fde73457d291"}, + {file = "pyrsistent-0.19.2-py3-none-any.whl", hash = "sha256:ea6b79a02a28550c98b6ca9c35b9f492beaa54d7c5c9e9949555893c8a9234d0"}, + {file = "pyrsistent-0.19.2.tar.gz", hash = "sha256:bfa0351be89c9fcbcb8c9879b826f4353be10f58f8a677efab0c017bf7137ec2"}, +] +tomlkit = [ + {file = "tomlkit-0.11.6-py3-none-any.whl", hash = "sha256:07de26b0d8cfc18f871aec595fda24d95b08fef89d147caa861939f37230bf4b"}, + {file = "tomlkit-0.11.6.tar.gz", hash = "sha256:71b952e5721688937fb02cf9d354dbcf0785066149d2855e44531ebdd2b65d73"}, +] +typing-extensions = [ + {file = "typing_extensions-4.4.0-py3-none-any.whl", hash = "sha256:16fa4864408f655d35ec496218b85f79b3437c829e93320c7c9215ccfd92489e"}, + {file = "typing_extensions-4.4.0.tar.gz", hash = "sha256:1511434bb92bf8dd198c12b1cc812e800d4181cfcb867674e0f8279cc93087aa"}, +] +zipp = [ + {file = "zipp-3.10.0-py3-none-any.whl", hash = "sha256:4fcb6f278987a6605757302a6e40e896257570d11c51628968ccb2a47e80c6c1"}, + {file = "zipp-3.10.0.tar.gz", hash = "sha256:7a7262fd930bd3e36c50b9a64897aec3fafff3dfdeec9623ae22b40e93f99bb8"}, +] diff --git a/vendors/pyproject.toml b/vendors/pyproject.toml new file mode 100644 index 0000000..ac990e1 --- /dev/null +++ b/vendors/pyproject.toml @@ -0,0 +1,32 @@ +[tool.poetry] +name = "vendors" +version = "1.0.0" +description = "Vendors" +authors = ["Sébastien Eustace "] + +license = "MIT" + +readme = "README.md" + +homepage = "https://github.com/python-poetry/core" +repository = "https://github.com/python-poetry/core" + +keywords = ["packaging", "dependency", "poetry"] + +classifiers = [ + "Topic :: Software Development :: Build Tools", + "Topic :: Software Development :: Libraries :: Python Modules" +] + +[tool.poetry.dependencies] +python = "^3.7" + +jsonschema = "^4.16.0" +lark = "^1.1.3" +packaging = ">=21.3" +tomlkit = ">=0.11.5,<1.0.0" + +# Needed by jsonschema and only at python < 3.8, but to make +# sure that it is always delivered we add an unconditional +# dependency here. +typing-extensions = "^4.4.0"